summaryrefslogtreecommitdiffstats
path: root/tests/functional/snapshot
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/snapshot')
-rw-r--r--tests/functional/snapshot/test_256_snapshots.py212
-rw-r--r--tests/functional/snapshot/test_activate_deactivate.py24
-rw-r--r--tests/functional/snapshot/test_activate_on_create.py25
-rw-r--r--tests/functional/snapshot/test_auto_delete.py231
-rw-r--r--tests/functional/snapshot/test_clone_snap.py2
-rw-r--r--tests/functional/snapshot/test_create_brick_down.py4
-rw-r--r--tests/functional/snapshot/test_del_snap.py2
-rw-r--r--tests/functional/snapshot/test_mount_snap.py38
-rw-r--r--tests/functional/snapshot/test_restore_online_vol.py23
-rw-r--r--tests/functional/snapshot/test_snap_clone_from_snap.py18
-rw-r--r--tests/functional/snapshot/test_snap_delete_existing_scheduler.py133
-rw-r--r--tests/functional/snapshot/test_snap_delete_multiple.py44
-rw-r--r--tests/functional/snapshot/test_snap_delete_original_volume.py62
-rw-r--r--tests/functional/snapshot/test_snap_delete_snap_of_volume.py18
-rw-r--r--tests/functional/snapshot/test_snap_glusterd_down.py50
-rw-r--r--tests/functional/snapshot/test_snap_info.py4
-rw-r--r--tests/functional/snapshot/test_snap_info_glusterd_restart.py179
-rw-r--r--tests/functional/snapshot/test_snap_invalid_cases.py4
-rw-r--r--tests/functional/snapshot/test_snap_invalid_names.py6
-rw-r--r--tests/functional/snapshot/test_snap_list_after_restart.py199
-rw-r--r--tests/functional/snapshot/test_snap_rebalance.py21
-rw-r--r--tests/functional/snapshot/test_snap_scheduler_status.py165
-rw-r--r--tests/functional/snapshot/test_snap_self_heal.py32
-rw-r--r--tests/functional/snapshot/test_snap_status_glusterd_restart.py162
-rw-r--r--tests/functional/snapshot/test_snap_uss.py25
-rw-r--r--tests/functional/snapshot/test_snap_uss_snapd.py377
-rw-r--r--tests/functional/snapshot/test_snap_uss_while_io.py36
-rw-r--r--tests/functional/snapshot/test_snapshot_create.py37
-rw-r--r--tests/functional/snapshot/test_snapshot_restore.py49
-rw-r--r--tests/functional/snapshot/test_uss_brick_down.py20
-rw-r--r--tests/functional/snapshot/test_uss_snap_active_deactive.py20
-rw-r--r--tests/functional/snapshot/test_uss_snap_restore.py239
-rw-r--r--tests/functional/snapshot/test_validate_snap_del_gd_down.py2
-rw-r--r--tests/functional/snapshot/test_validate_snap_scheduler.py2
-rw-r--r--tests/functional/snapshot/test_validate_snaps_dir_over_uss.py46
-rw-r--r--tests/functional/snapshot/test_validate_snapshot_max_limit.py21
-rw-r--r--tests/functional/snapshot/test_validate_snapshot_rebalance.py21
37 files changed, 1682 insertions, 871 deletions
diff --git a/tests/functional/snapshot/test_256_snapshots.py b/tests/functional/snapshot/test_256_snapshots.py
index 9c5ceaf99..77aaae591 100644
--- a/tests/functional/snapshot/test_256_snapshots.py
+++ b/tests/functional/snapshot/test_256_snapshots.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,48 +14,34 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-"""
-Description : The purpose of this test is to validate create snap>256
-"""
-
+from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import validate_io_procs, get_mounts_stat
-from glustolibs.gluster.snap_ops import get_snap_list, snap_delete_all
+from glustolibs.io.utils import (
+ wait_for_io_to_complete,
+ get_mounts_stat)
+from glustolibs.gluster.snap_ops import (
+ snap_create,
+ set_snap_config,
+ get_snap_list,
+ snap_delete_all)
@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed',
'distributed-dispersed'], ['glusterfs', 'nfs', 'cifs']])
-class SanpCreate256(GlusterBaseClass):
- """
- Test for snapshot create for max 256
- Steps :
- 1. Create and start a volume
- 2. Mount the volume on a client
- 3. Perform some heavy IO
- 4. Varify IO
- 5. modify max snap limit to default to 256.
- 6. Create 256 snapshots
- 7. Varify 256 created successfully
- 8. Create 257th snapshot - check for failure
- -- it should fail.
- 9. Cleanup
-
- """
+class TestValidateSnaps256(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -66,107 +52,113 @@ class SanpCreate256(GlusterBaseClass):
"""
setUp method
"""
+ self.get_super_method(self, 'setUp')()
+
# Setup_Volume
- GlusterBaseClass.setUpClass.im_func(self)
- ret = self.setup_volume_and_mount_volume(mounts=self.mounts,
- volume_create_force=True)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to setup and mount volume")
g.log.info("Volume %s has been setup successfully", self.volname)
+ self.all_mounts_procs = []
+ self.snapshots = [('snap-test-validate-256-snapshots-%s-%s'
+ % (self.volname, i))for i in range(0, 256)]
+
def tearDown(self):
"""
tearDown
"""
+ self.get_super_method(self, 'tearDown')()
+
+ # Delete all snapshots
ret, _, _ = snap_delete_all(self.mnode)
- if not ret:
- raise ExecutionError("Failed to delete all snaps")
- GlusterBaseClass.tearDown.im_func(self)
+ if ret:
+ raise ExecutionError("Failed to delete all snapshots")
+ g.log.info("Successfully deleted all snapshots")
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume & mount
- """
- g.log.info("Starting volume and mount cleanup")
- ret = cls.unmount_volume_and_cleanup_volume(cls.mounts)
+ # Unmount and cleanup volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to cleanup volume and mount")
g.log.info("Cleanup successful for the volume and mount")
- GlusterBaseClass.tearDownClass.im_func(cls)
-
def test_validate_snaps_256(self):
+ """
+ Validate snapshot creation for 256 snapshots
+
+ * Perform some IO
+ * Set snapshot config option snap-max-hard-limit to 256
+ * Create 256 snapshots
+ * Verify 256 created successfully
+ * Create 257th snapshot - creation should fail as it will
+ exceed the hard-limit
+ * Verify snapshot list for 256 snapshots
- # Start IO on all mounts.
- all_mounts_procs = []
- count = 1
- for mount_obj in self.mounts:
- g.log.info("Starting IO on %s:%s", mount_obj.client_system,
- mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
- "--dirname-start-num %d "
- "--dir-depth 2 "
- "--dir-length 10 "
- "--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
- count = count + 10
-
- # Validate IO
- g.log.info("Validating IO's")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("Successfully validated all io's")
-
- # Get stat of all the files/dirs created.
- g.log.info("Get stat of all the files/dirs created.")
+ """
+ # pylint: disable=too-many-statements
+ # Start IO on all mounts
+ cmd = (
+ "/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name firstfiles %s"
+ % (self.script_upload_path,
+ self.mounts[0].mountpoint))
+ proc = g.run_async(
+ self.mounts[0].client_system, cmd, user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Wait for IO to complete
+ self.assertTrue(
+ wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
+ "IO failed on %s" % self.mounts[0])
+ g.log.info("IO is successful on all mounts")
+
+ # Perform stat on all the files/dirs created
ret = get_mounts_stat(self.mounts)
self.assertTrue(ret, "Stat failed on some of the clients")
- g.log.info("Successfully got stat of all files/dirs created")
-
- # set config for 256 snpas (to make sure to override)
- cmd_str = ("gluster snapshot config snap-max-hard-limit 256"
- " --mode=script")
- ret = g.run(self.mnode, cmd_str)
- self.assertTrue(ret, "Failed to set snap-max-hard-limit to 256.")
- g.log.info("snap-max-hard limit successfully set for 256.")
-
- # Create 256 snaps
- for i in range(1, 257, 1):
- cmd_str = "gluster snapshot create %s %s %s" % (
- "snapy%s" % i, self.volname, "no-timestamp")
- ret = g.run(self.mnode, cmd_str)
- self.assertTrue(ret, ("Failed to create snapshot for %s"
- % self.volname))
- g.log.info("Snapshot %s created successfully for volume %s",
- "snapy%s" % i, self.volname)
-
- # Check for no. of snaps using snap_list it should be 256
+ g.log.info("Successfully performed stat on all files/dirs created")
+
+ # Set config option snap-max-hard-limit to 256
+ # This is to make sure to override
+ max_hard_limit = {'snap-max-hard-limit': '256'}
+ ret, _, _ = set_snap_config(self.mnode, max_hard_limit)
+ self.assertEqual(ret, 0, "Failed to set snapshot config option "
+ "snap-max-hard-limit to 256")
+ g.log.info("Successfully set snapshot config option "
+ "snap-max-hard-limit to 256")
+
+ # Create 256 snapshots
+ for snapname in self.snapshots:
+ ret, _, _ = snap_create(self.mnode, self.volname, snapname)
+ self.assertEqual(ret, 0, ("Failed to create snapshot %s for %s"
+ % (snapname, self.volname)))
+ sleep(1)
+ g.log.info("Snapshots created successfully for volume %s",
+ self.volname)
+
+ # Validate snapshot list for 256 snapshots
snap_list = get_snap_list(self.mnode)
- self.assertTrue((len(snap_list) == 256), "No of snaps not consistent "
- "for volume %s" % self.volname)
- g.log.info("Successfully validated number of snaps.")
-
- # Validate all 256 snap names created during
- for i in range(1, 257, 1):
- self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
- "found " % ("snapy%s" % i))
- g.log.info("Successfully validated names of snap")
-
- # Try to create 257th snapshot
- cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname,
- "no-timestamp")
- ret = g.run(self.mnode, cmd_str)
- self.assertEqual(ret, 1, ("Unexpected: Successfully created 'snap'"
- " for volume %s" % self.volname))
- g.log.info("Snapshot 'snap' not created as it is 257th snap")
-
- # Check for no. of snaps using snap_list it should be 256
+ self.assertTrue((len(snap_list) == 256), "Failed: Number of snapshots "
+ "is not consistent for volume %s" % self.volname)
+ g.log.info("Successfully validated number of snapshots")
+
+ # Validate snapshot existence using snap-name
+ for snapname in self.snapshots:
+ self.assertIn(snapname, snap_list,
+ "Failed: Snapshot %s not found" % snapname)
+ g.log.info("Successfully validated snapshots existence using "
+ "snap-name")
+
+ # Try to exceed snap-max-hard-limit by creating 257th snapshot
+ snap_257 = "snap-test-validate-256-snapshots-%s-257" % (self.volname)
+ ret, _, _ = snap_create(self.mnode, self.volname, snap_257)
+ self.assertEqual(
+ ret, 1, ("Unexpected: Successfully created %s for volume %s"
+ % (snap_257, self.volname)))
+ g.log.info("Snapshot %s not created as it exceeds the "
+ "snap-max-hard-limit", snap_257)
+
+ # Validate snapshot list for 256 snapshots
snap_list = get_snap_list(self.mnode)
- self.assertEqual(256, len(snap_list), "No of snaps not consistent "
- "for volume %s" % self.volname)
- g.log.info("Successfully validated number of snaps.")
+ self.assertEqual(len(snap_list), 256, "Failed: Number of snapshots "
+ "is not consistent for volume %s" % self.volname)
+ g.log.info("Successfully validated number of snapshots")
diff --git a/tests/functional/snapshot/test_activate_deactivate.py b/tests/functional/snapshot/test_activate_deactivate.py
index 8a75509cc..e3b46bb9c 100644
--- a/tests/functional/snapshot/test_activate_deactivate.py
+++ b/tests/functional/snapshot/test_activate_deactivate.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -41,7 +41,7 @@ class TestActivateDeactivate(GlusterBaseClass):
setup volume and initialize necessary variables
"""
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
g.log.info("Starting %s:", cls.__name__)
# Setup volume and mount
g.log.info("Starting to Setup Volume")
@@ -58,23 +58,15 @@ class TestActivateDeactivate(GlusterBaseClass):
if ret != 0:
raise ExecutionError("Snapshot Delete Failed")
g.log.info("Successfully deleted all snapshots")
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume & mount
- """
- # stopping the volume and clean up the volume
- g.log.info("Starting to Cleanup Volume")
- ret = cls.cleanup_volume()
+ # Cleanup-volume
+ ret = self.cleanup_volume()
if not ret:
- raise ExecutionError("Failed to Cleanup Volume and mount")
- g.log.info("Successful in Cleanup Volume and mount")
+ raise ExecutionError("Failed to Cleanup Volume")
+ g.log.info("Successful in Cleanup Volume")
- # calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDownClass.im_func(cls)
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
def test_activate_deactivate(self):
# pylint: disable=too-many-branches, too-many-statements
diff --git a/tests/functional/snapshot/test_activate_on_create.py b/tests/functional/snapshot/test_activate_on_create.py
index 3494bc332..939641b9c 100644
--- a/tests/functional/snapshot/test_activate_on_create.py
+++ b/tests/functional/snapshot/test_activate_on_create.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -38,7 +38,7 @@ class TestActivateOnCreate(GlusterBaseClass):
setup volume and initialize necessary variables
"""
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
g.log.info("Starting %s:", cls.__name__)
# Setup volume and mount
g.log.info("Starting to Setup Volume")
@@ -67,23 +67,14 @@ class TestActivateOnCreate(GlusterBaseClass):
g.log.info("set_snap_config Success to disable "
"activate-on-create")
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
-
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume & mount
- """
- # stopping the volume and clean up the volume
- g.log.info("Starting to Cleanup Volume")
- ret = cls.cleanup_volume()
+ # Cleanup-volume
+ ret = self.cleanup_volume()
if not ret:
- raise ExecutionError("Failed to Cleanup Volume and mount")
- g.log.info("Successful in Cleanup Volume and mount")
+ raise ExecutionError("Failed to Cleanup Volume")
+ g.log.info("Successful in Cleanup Volume")
- # calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDownClass.im_func(cls)
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
def test_activate_on_create(self):
# pylint: disable=too-many-branches, too-many-statements
diff --git a/tests/functional/snapshot/test_auto_delete.py b/tests/functional/snapshot/test_auto_delete.py
index db8a50f0e..d1e934c02 100644
--- a/tests/functional/snapshot/test_auto_delete.py
+++ b/tests/functional/snapshot/test_auto_delete.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -16,132 +16,147 @@
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass
-from glustolibs.gluster.gluster_base_class import runs_on
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.snap_ops import (snap_create,
- snap_delete_all,
set_snap_config,
- get_snap_config)
+ get_snap_config,
+ get_snap_list)
@runs_on([['replicated', 'distributed-replicated', 'dispersed',
'distributed-dispersed', 'distributed'],
['glusterfs', 'nfs', 'cifs']])
-class DeleteSnapTests(GlusterBaseClass):
+class TestSnapAutoDelete(GlusterBaseClass):
"""
- DeleteSnapTests contains tests which verifies the deletion of
- snapshots
+ TestSnapAutoDelete contains tests which verifies the deletion of
+ snapshots along with the snapshot config option 'auto-delete'
"""
- def test_auto_delete_snap(self):
+ @classmethod
+ def setUpClass(cls):
"""
- * enabling auto-delete snapshot
- * Setting max-hard limit and max-soft-limit
- * Validating max-hard-limit and max-soft-limit
- * Verify the limits by creating another 20 snapshots
- * Oldest of newly created snapshots will be deleted
- * Retaining the latest 8(softlimit) snapshots
- * cleanup snapshots and volumes
+ setup volume and initialize necessary variables which is used in tests
"""
- # Setup volume
- ret = self.setup_volume()
+
+ # calling GlusterBaseClass setUpClass
+ cls.get_super_method(cls, 'setUpClass')()
+ # Setup Volume
+ ret = cls.setup_volume()
if not ret:
- raise ExecutionError("Failed to setup volume %s" % self.volname)
- g.log.info("Volume %s has been setup successfully", self.volname)
-
- # enabling auto-delete
- cmd = "gluster snapshot config auto-delete enable"
- ret = g.run(self.mnode, cmd)
- self.assertTrue(ret, ("Failed to enable auto-delete snapshot config"
- "option on volume % s" % self.volname))
- g.log.info("Snapshot auto-delete Successfully enabled")
-
- # setting max-hard-limit
- option = {'snap-max-hard-limit': '10'}
- ret = set_snap_config(self.mnode, option, self.volname)
- self.assertTrue(ret, ("Failed to set snap-max-hardlimit"
- "config option for volume %s" % self.volname))
- g.log.info("snap-max-hardlimit config option Successfully set for"
- "volume %s", self.volname)
+ raise ExecutionError("Failed to Setup_Volume %s" % cls.volname)
+ g.log.info("Successful in Setup Volume %s", cls.volname)
+ cls.autodel_enable = {'auto-delete': 'enable'}
- # Validating max-hard-limit
- hardlimit = get_snap_config(self.mnode)
- get_hardlimit = hardlimit['volumeConfig'][0]['hardLimit']
- if get_hardlimit != '10':
- self.assertTrue(ret, ("Failed to Validate max-hard-limit"))
- g.log.info("Successfully validated max-hard-limit")
-
- # setting max-soft-limit
- option = {'snap-max-soft-limit': '80'}
- ret = set_snap_config(self.mnode, option)
- self.assertTrue(ret, ("Failed to set snap-max-soft-limit"
- "config option"))
- g.log.info("snap-max-soft-limit config option Successfully set")
-
- # Validating max-soft-limit
- softlimit = get_snap_config(self.mnode)
- get_softlimit = softlimit['volumeConfig'][0]['softLimit']
- if get_softlimit != '8':
- self.assertTrue(ret, ("Failed to Validate max-soft-limit"))
- g.log.info("Successfully validated max-soft-limit")
-
- # creating 20 snapshots. As the count
- # of snapshots crosses the
- # soft-limit the oldest of newly created snapshot should
- # be deleted only latest 8 snapshots
- # should remain.
-
- # creating 20 more snapshots
- for snap_count in range(10, 30):
- ret = snap_create(self.mnode, self.volname, "snap%s"
- % snap_count, False,
- "This is the Description with $p3c1al"
- "characters!")
- self.assertTrue(ret, ("Failed to create snapshot snap%s for volume"
- "%s" % (snap_count, self.volname)))
- g.log.info("Snapshot snap%s of volume %s created successfully")
-
- # snapshot list to list total number of snaps after auto-delete
- cmd = "gluster snapshot list | wc -l"
- ret, out, _ = g.run(self.mnode, cmd)
- self.assertEqual(ret, 0, ("Failed to list snapshot of volume %s"
- % self.volname))
- g.log.info("Total number of snapshots created after auto-delete"
- "enabled is %s", out)
- if out != 8:
- g.log.info("Failed to validate snapshots with expected"
- "number of snapshots")
- g.log.info("Snapshot Validation Successful")
- g.log.info("Snapshot list command for volume %s was successful",
- self.volname)
+ def setUp(self):
+ """
+ Initialize necessary variables.
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Gather the default snapshot config option values
+ self.snap_conf = get_snap_config(self.mnode)
+ self.assertIsNotNone(
+ self.snap_conf, "Failed to get the snapshot config options")
+ softlim = self.snap_conf['systemConfig']['softLimit']
+ self.def_conf_softlim = {'snap-max-soft-limit': softlim[:-1]}
+ autodel = self.snap_conf['systemConfig']['autoDelete']
+ self.def_conf_autodel = {'auto-delete': autodel}
+ g.log.info("Successfully gathered the default snapshot config options")
+
+ self.snapshots = [('snap-test-snap-auto-delete-%s-%s'
+ % (self.volname, i))for i in range(0, 20)]
def tearDown(self):
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
-
- # disabling auto-delete
- cmd = "gluster snapshot config auto-delete disable"
- ret = g.run(self.mnode, cmd)
- self.assertTrue(ret, ("Failed to disable auto-delete snapshot"
- "config option"))
- g.log.info("Snapshot auto-delete Successfully disabled")
-
- # deleting created snapshots
- ret = snap_delete_all(self.mnode)
- self.assertTrue(ret, ("Failed to delete snapshot of volume"
- "%s" % self.volname))
- g.log.info("Successfully deleted snapshots of volume %s",
- self.volname)
+ self.get_super_method(self, 'tearDown')()
# setting back default max-soft-limit to 90%
- option = {'snap-max-soft-limit': '90'}
- ret = set_snap_config(self.mnode, option)
- self.assertTrue(ret, ("Failed to set snap-max-soft-limit"
- "config option"))
- g.log.info("snap-max-soft-limit config option Successfully set")
-
- # cleanup-volume
+ ret, _, _ = set_snap_config(self.mnode, self.def_conf_softlim)
+ if ret:
+ raise ExecutionError("Failed to set the default config options "
+ "for snap-max-soft-limit")
+ g.log.info("Successfully set the snapshot config options to default")
+
+ # setting back default value for auto-delete config option
+ ret, _, _ = set_snap_config(self.mnode, self.def_conf_autodel)
+ if ret:
+ raise ExecutionError("Failed to set the default config option for "
+ "auto-delete")
+ g.log.info("Successfully set the snapshot config options to default")
+
+ # Cleanup-volume
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to Cleanup Volume")
- g.log.info("Cleanup volume %s Completed Successfully", self.volname)
+ g.log.info("Successful in Cleanup Volume")
+
+ def test_snap_auto_delete(self):
+ """
+ Verifying snapshot auto-delete config option
+
+ * Enable auto-delete snapshot
+ * Set snap-max-hard limit and snap-max-soft-limit
+ * Validate snap-max-hard-limit and snap-max-soft-limit
+ * Verify the limits by creating another 20 snapshots
+ * Oldest of newly created snapshots will be deleted
+ * Retaining the latest 8 (softlimit) snapshots
+ * Cleanup snapshots and volumes
+ """
+
+ # pylint: disable=too-many-statements
+ # Enable auto-delete snapshot config option
+ ret, _, _ = set_snap_config(self.mnode, self.autodel_enable)
+ self.assertEqual(ret, 0, ("Failed to enable auto-delete snapshot "
+ "config option on volume %s", self.volname))
+ g.log.info("Successfully enabled snapshot auto-delete")
+
+ # Set snap-max-hard-limit snapshot config option for volume
+ max_hard_limit = {'snap-max-hard-limit': '10'}
+ ret, _, _ = set_snap_config(self.mnode, max_hard_limit, self.volname)
+ self.assertEqual(ret, 0, ("Failed to set snap-max-hard-limit"
+ "config option for volume %s", self.volname))
+ g.log.info("Successfully set snap-max-hard-limit config option for"
+ "volume %s", self.volname)
+
+ # Validate snap-max-hard-limit snapshot config option
+ hard_limit_val = get_snap_config(self.mnode)
+ self.assertEqual(hard_limit_val['volumeConfig'][0]['hardLimit'], '10',
+ ("Failed to Validate snap-max-hard-limit"))
+ g.log.info("Successfully validated snap-max-hard-limit")
+
+ # Set snap-max-soft-limit snapshot config option
+ max_soft_limit = {'snap-max-soft-limit': '80'}
+ ret, _, _ = set_snap_config(self.mnode, max_soft_limit)
+ self.assertEqual(ret, 0, ("Failed to set snap-max-soft-limit"
+ "config option"))
+ g.log.info("Successfully set snap-max-soft-limit config option")
+
+ # Validate snap-max-soft-limit snapshot config option
+ soft_limit_val = get_snap_config(self.mnode)
+ self.assertEqual(soft_limit_val['volumeConfig'][0]['softLimit'], '8',
+ ("Failed to Validate max-soft-limit"))
+ g.log.info("Successfully validated snap-max-soft-limit")
+
+ # Create 20 snapshots. As the count of snapshots crosses the
+ # soft-limit the oldest of newly created snapshot should
+ # be deleted and only the latest 8 snapshots must remain.
+ for snapname in self.snapshots:
+ ret, _, _ = snap_create(self.mnode, self.volname, snapname,
+ description="This is the Description wit#"
+ " ($p3c1al) ch@r@cters!")
+ self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
+ "volume %s", snapname, self.volname))
+ g.log.info("Snapshot snap%s of volume %s created successfully",
+ snapname, self.volname)
+
+ # Perform snapshot list to get total number of snaps after auto-delete
+ # Validate the existence of the snapshots using the snapname
+ snaplist = get_snap_list(self.mnode)
+ self.assertEqual(len(snaplist), 8,
+ ("Failed: The snapshot count is not as expected"))
+ for snapname in self.snapshots[-8:]:
+ self.assertIn(snapname, snaplist, "Failed to validate snapshot "
+ "existence for the snapshot %s" % snapname)
+ g.log.info("Successful in validating the Snapshot count and existence "
+ "by snapname")
diff --git a/tests/functional/snapshot/test_clone_snap.py b/tests/functional/snapshot/test_clone_snap.py
index 6311a282a..5129e35d3 100644
--- a/tests/functional/snapshot/test_clone_snap.py
+++ b/tests/functional/snapshot/test_clone_snap.py
@@ -48,7 +48,7 @@ class CloneSnapshot(GlusterBaseClass):
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
diff --git a/tests/functional/snapshot/test_create_brick_down.py b/tests/functional/snapshot/test_create_brick_down.py
index 529c39a3d..60ace2bcf 100644
--- a/tests/functional/snapshot/test_create_brick_down.py
+++ b/tests/functional/snapshot/test_create_brick_down.py
@@ -45,7 +45,7 @@ class CreateSnapwhenBricksareDown(GlusterBaseClass):
"""
def setUp(self):
# SetUp volume and Mount volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -135,7 +135,7 @@ class CreateSnapwhenBricksareDown(GlusterBaseClass):
def tearDown(self):
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# Unmount and cleanup-volume
g.log.info("Starting to Unmount and cleanup-volume")
diff --git a/tests/functional/snapshot/test_del_snap.py b/tests/functional/snapshot/test_del_snap.py
index c7868a314..01a71513f 100644
--- a/tests/functional/snapshot/test_del_snap.py
+++ b/tests/functional/snapshot/test_del_snap.py
@@ -35,7 +35,7 @@ class DeleteSnapshotTests(GlusterBaseClass):
"""
def setUp(self):
# SetUp volume and Mount volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
diff --git a/tests/functional/snapshot/test_mount_snap.py b/tests/functional/snapshot/test_mount_snap.py
index 67b8a43cd..ef918ba8b 100644
--- a/tests/functional/snapshot/test_mount_snap.py
+++ b/tests/functional/snapshot/test_mount_snap.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,7 +22,9 @@
"""
import os
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ConfigError, ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.gluster_base_class import runs_on
@@ -40,15 +42,13 @@ from glustolibs.gluster.mount_ops import create_mount_objs
class TestSnapMountSnapshot(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients %s" % cls.clients)
@@ -63,7 +63,7 @@ class TestSnapMountSnapshot(GlusterBaseClass):
self.mount1 = []
self.mpoint = "/mnt/snap1"
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
@@ -84,13 +84,17 @@ class TestSnapMountSnapshot(GlusterBaseClass):
g.log.info("Starting IO on all mounts...")
g.log.info("mounts: %s", self.mounts)
all_mounts_procs = []
+ self.counter = 1
for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file%d %s" % (
+ self.script_upload_path,
+ self.counter,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
+ self.counter += 100
# Validate I/O
self.assertTrue(
@@ -149,9 +153,10 @@ class TestSnapMountSnapshot(GlusterBaseClass):
g.log.info("Starting IO on all mounts...")
all_mounts_procs = []
for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -167,9 +172,10 @@ class TestSnapMountSnapshot(GlusterBaseClass):
g.log.info("mounts: %s", self.mount1)
all_mounts_procs = []
for mount_obj in self.mount1:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -183,7 +189,7 @@ class TestSnapMountSnapshot(GlusterBaseClass):
def tearDown(self):
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# unmounting volume from Custom mount point
g.log.info("UnMounting mount point %s", self.mpoint)
diff --git a/tests/functional/snapshot/test_restore_online_vol.py b/tests/functional/snapshot/test_restore_online_vol.py
index 023e9ead4..2fa46012b 100644
--- a/tests/functional/snapshot/test_restore_online_vol.py
+++ b/tests/functional/snapshot/test_restore_online_vol.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,7 +20,9 @@ Description:
When we try to restore online volume it should fail.
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.gluster_base_class import runs_on
@@ -37,15 +39,14 @@ class SnapRSOnline(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
+
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients ")
@@ -54,7 +55,7 @@ class SnapRSOnline(GlusterBaseClass):
def setUp(self):
# SettingUp and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume and mount volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -103,17 +104,17 @@ class SnapRSOnline(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 2 "
"--max-num-of-dirs 2 "
- "--num-of-files 2 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
-
+ "--num-of-files 2 %s" % (
+ self.script_upload_path,
+ self.counter, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
+ self.counter += 100
self.all_mounts_procs.append(proc)
self.io_validation_complete = False
diff --git a/tests/functional/snapshot/test_snap_clone_from_snap.py b/tests/functional/snapshot/test_snap_clone_from_snap.py
index 8b8e46eda..b976cdb41 100644
--- a/tests/functional/snapshot/test_snap_clone_from_snap.py
+++ b/tests/functional/snapshot/test_snap_clone_from_snap.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,6 +22,7 @@ Creation of clone volume from snapshot.
"""
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.gluster_base_class import runs_on
@@ -41,25 +42,23 @@ class SnapshotCloneValidate(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.snap = "snap0"
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients ")
g.log.info("Successfully uploaded IO scripts to clients %s")
def setUp(self):
+ self.get_super_method(self, 'setUp')()
- GlusterBaseClass.setUpClass.im_func(self)
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -91,9 +90,10 @@ class SnapshotCloneValidate(GlusterBaseClass):
g.log.info("mounts: %s", self.mounts)
all_mounts_procs = []
for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s" % (self.script_upload_path,
- mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run(self.clients[0], cmd)
all_mounts_procs.append(proc)
g.log.info("Successfully Performed I/O on all mount points")
diff --git a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
index a321dcd39..19ad38e21 100644
--- a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
+++ b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,25 +14,17 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-"""
-Description:
+from time import sleep
- Test Cases in this module tests the
- snapshot scheduler behavior when shared volume is mounted/not
- mounted. scheduler command such as initialise scheduler,
- enable scheduler, status of scheduler.
-"""
-import time
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_base_class import (GlusterBaseClass,
- runs_on)
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import validate_io_procs
from glustolibs.gluster.volume_ops import get_volume_info
from glustolibs.gluster.shared_storage_ops import (enable_shared_storage,
is_shared_volume_mounted,
- is_shared_volume_unmounted,
disable_shared_storage)
from glustolibs.gluster.snap_scheduler import (scheduler_enable,
scheduler_init,
@@ -50,15 +42,13 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients %s" % cls.clients)
@@ -68,7 +58,7 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -84,7 +74,13 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
# Validate shared storage mounted
g.log.info("Starting to validate shared storage mounted")
- ret = is_shared_volume_mounted(self.mnode)
+ count = 0
+ while count < 5:
+ ret = is_shared_volume_mounted(self.mnode)
+ if ret:
+ break
+ sleep(2)
+ count += 1
if not ret:
raise ExecutionError("Failed to mount shared volume")
g.log.info("Successfully mounted shared volume")
@@ -107,27 +103,16 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
"on all nodes")
g.log.info("Successfully disabled snapshot scheduler on all nodes")
- # disable shared storage
- g.log.info("starting to disable shared storage")
- count = 0
- while count < 80:
+ # Check if shared storage is enabled
+ # Disable if true
+ g.log.info("Checking if shared storage is mounted")
+ ret = is_shared_volume_mounted(self.mnode)
+ if ret:
+ g.log.info("Disabling shared storage")
ret = disable_shared_storage(self.mnode)
- if ret:
- break
- time.sleep(2)
- count += 1
-
- if not ret:
- raise ExecutionError("Unexpected: Failed to disable "
- "shared storage")
- g.log.info("Expected: Successfully disabled shared storage")
-
- # Validate shared volume unmounted
- g.log.info("Validate shared volume unmounted")
- ret = is_shared_volume_unmounted(self.mnode)
- if not ret:
- raise ExecutionError("Failed to unmount shared storage")
- g.log.info("Successfully unmounted shared storage")
+ if not ret:
+ raise ExecutionError("Failed to disable shared storage")
+ g.log.info("Successfully disabled shared storage")
# Unmount and cleanup-volume
g.log.info("Starting to Unmount and cleanup-volume")
@@ -139,88 +124,90 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
def test_snap_delete_existing_scheduler(self):
# pylint: disable=too-many-statements
"""
+ Description:
+
+ Validating snapshot scheduler behavior when existing schedule
+ is deleted.
+
Steps:
- 1. enable shared volume
- 2. create a volume
- 3. initialise snap scheduler on all nodes
- 4. enable snap scheduler
- 5. check snap scheduler status
- 6. perform io on mounts
- 7. schedule a job of creating snapshot
- every 30 mins
- 8. list jobs created
- 9. delete scheduled job
- 10. validate io is successful
- 11. list job should not list
- any existing snapshot jobs
+ * Enable shared volume
+ * Create a volume
+ * Initialise snap_scheduler on all nodes
+ * Enable snap_scheduler
+ * Validate snap_scheduler status
+ * Perform IO on mounts
+ * Schedule a job of creating snapshot every 30 mins
+ * Perform snap_scheduler list
+ * Delete scheduled job
+ * Validate IO is successful
+ * Perform snap_scheduler list
"""
# Initialise snap scheduler
- g.log.info("Initialising snap scheduler on all servers")
+ g.log.info("Initialising snap_scheduler on all servers")
count = 0
while count < 80:
ret = scheduler_init(self.servers)
if ret:
break
- time.sleep(2)
+ sleep(2)
count += 1
self.assertTrue(ret, "Failed to initialise scheduler on all servers")
g.log.info("Successfully initialised scheduler on all servers")
# Enable snap scheduler
- g.log.info("Enabling snap scheduler")
+ g.log.info("Enabling snap_scheduler")
ret, _, _ = scheduler_enable(self.mnode)
self.assertEqual(ret, 0, "Failed to enable scheduler on node %s"
% self.mnode)
g.log.info("Successfully enabled scheduler on node %s", self.mnode)
- # Check snapshot scheduler status
- g.log.info("checking status of snapshot scheduler")
+ # Validate snapshot scheduler status
+ g.log.info("Validating status of snap_scheduler")
for server in self.servers:
count = 0
while count < 40:
ret, status, _ = scheduler_status(server)
if status.strip().split(":")[2] == ' Enabled':
break
- time.sleep(2)
+ sleep(2)
count += 2
self.assertEqual(status.strip().split(":")[2], ' Enabled',
- "Failed to check status of scheduler")
- g.log.info("Successfully checked scheduler status")
+ "Failed to validate status of scheduler")
+ g.log.info("Successfully validated scheduler status")
- # write files on all mounts
+ # Write files on all mounts
g.log.info("Starting IO on all mounts...")
all_mounts_procs = []
for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s" % (self.script_upload_path,
- mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
- # add a job to schedule snapshot every 30 mins
+ # Add a job to schedule snapshot every 30 mins
g.log.info("Starting to add new job")
self.scheduler = r"*/30 * * * *"
self.job_name = "Job1"
ret, _, _ = scheduler_add_jobs(self.mnode, self.job_name,
- self.scheduler,
- self.volname)
+ self.scheduler, self.volname)
self.assertEqual(ret, 0, "Failed to add job")
g.log.info("Successfully added Job on volume %s", self.volname)
- # scheduler list
+ # Perform snap_scheduler list
g.log.info("Starting to list all scheduler jobs")
ret, _, _ = scheduler_list(self.mnode)
self.assertEqual(ret, 0, "Failed to list scheduler jobs")
g.log.info("Successfully listed all jobs")
- # delete scheduled job
+ # Delete scheduled job
g.log.info("Starting to delete scheduled jobs")
ret, _, _ = scheduler_delete(self.mnode, self.job_name)
self.assertEqual(ret, 0, "Failed to delete scheduled job")
- g.log.info("Successfully deleted scheduled job %s",
- self.job_name)
+ g.log.info("Successfully deleted scheduled job %s", self.job_name)
# Validate IO
self.assertTrue(
@@ -228,11 +215,11 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
"IO failed on some of the clients"
)
- # scheduler list (no active jobs should be there)
+ # Perform snap_scheduler list (no active jobs should be present)
g.log.info("Starting to list all scheduler jobs")
ret, out, _ = scheduler_list(self.mnode)
self.assertEqual(ret, 0, "Failed to list scheduler jobs")
ret1 = out.strip().split(":")
- self.assertEqual(ret1[1], " No snapshots scheduled", "Unexpected:"
- "Failed to delete scheduled job %s" % self.job_name)
+ self.assertEqual(ret1[1], " No snapshots scheduled", "Unexpected: "
+ "Jobs are getting listed even after being deleted")
g.log.info("Expected: No snapshots Jobs scheduled")
diff --git a/tests/functional/snapshot/test_snap_delete_multiple.py b/tests/functional/snapshot/test_snap_delete_multiple.py
index a728331a8..e1be4732f 100644
--- a/tests/functional/snapshot/test_snap_delete_multiple.py
+++ b/tests/functional/snapshot/test_snap_delete_multiple.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,7 +22,9 @@ Creation of clone from snapshot of volume.
"""
import os
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import volume_start
@@ -47,7 +49,7 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.snap1 = "snap1"
cls.snap2 = "snap21"
cls.clone1 = "clone1"
@@ -58,11 +60,9 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients ")
@@ -71,7 +71,7 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume and mount volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -99,7 +99,7 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
"""
# Perform I/O
- def io_operation():
+ def io_operation(name):
g.log.info("Starting to Perform I/O")
all_mounts_procs = []
for mount_obj in self.mounts:
@@ -107,9 +107,11 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
mount_obj.client_system, mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
- command = ("python %s create_files -f 100 --fixed-file-size"
- " 1k %s" % (self.script_upload_path,
- mount_obj.mountpoint))
+ fname = "{}-{}".format(mount_obj.client_system, name)
+ command = ("/usr/bin/env python {} create_files -f 100 "
+ "--fixed-file-size 1k --base-file-name {}"
+ " {}".format(self.script_upload_path,
+ fname, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -208,37 +210,37 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
g.log.info("Volume %s mounted on %s", clone, mpoint)
return 0
- value1 = range(0, 20)
- value2 = range(20, 30)
- value3 = range(30, 40)
+ value1 = list(range(0, 20))
+ value2 = list(range(20, 30))
+ value3 = list(range(30, 40))
ret1 = create_snap(value1, self.volname, self.snap1,
self.clone1, counter=20)
self.assertEqual(ret1, 30, "Failed")
ret2 = mount_clone_and_io(self.clone1, self.mpoint1)
self.assertEqual(ret2, 0, "Failed to mount volume")
- ret = io_operation()
+ ret = io_operation("first")
self.assertEqual(ret, 0, "Failed to perform io")
ret3 = create_snap(value2, self.clone1, self.snap2,
self.clone2, ret1)
self.assertEqual(ret3, 40, "Failed")
ret4 = mount_clone_and_io(self.clone2, self.mpoint2)
self.assertEqual(ret4, 0, "Failed to mount volume")
- ret = io_operation()
+ ret = io_operation("second")
self.assertEqual(ret, 0, "Failed to perform io")
ret1 = create_snap(value3, self.clone2, self.snap2,
self.clone2, ret3)
self.assertEqual(ret1, 0, "Failed to create snapshots")
+ def tearDown(self):
+ # Calling GlusterBaseClass teardown
+ self.get_super_method(self, 'tearDown')()
+
# delete created snapshots
g.log.info("starting to delete all created snapshots")
ret, _, _ = snap_delete_all(self.mnode)
self.assertEqual(ret, 0, "Failed to delete all snapshots")
g.log.info("Successfully deleted all snapshots")
- def tearDown(self):
- # Calling GlusterBaseClass teardown
- GlusterBaseClass.tearDown.im_func(self)
-
# Disable Activate on create
option = {'activate-on-create': 'disable'}
ret, _, _ = set_snap_config(self.mnode, option)
@@ -249,13 +251,13 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
# umount clone volume
g.log.info("Unmounting clone volume")
- ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint1)
+ ret, _, _ = umount_volume(self.clients[0], self.mpoint1)
if ret != 0:
raise ExecutionError("Failed to unmount clone "
"volume %s" % self.clone1)
g.log.info("Successfully unmounted clone volume %s", self.clone1)
- ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint2)
+ ret, _, _ = umount_volume(self.clients[0], self.mpoint2)
if ret != 0:
raise ExecutionError("Failed to unmount clone "
"volume %s" % self.clone2)
diff --git a/tests/functional/snapshot/test_snap_delete_original_volume.py b/tests/functional/snapshot/test_snap_delete_original_volume.py
index 1bf62fd87..249dab4fa 100644
--- a/tests/functional/snapshot/test_snap_delete_original_volume.py
+++ b/tests/functional/snapshot/test_snap_delete_original_volume.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,19 +23,22 @@ and delete snapshot and original volume.
Validate cloned volume is not affected.
"""
+from time import sleep
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import cleanup_volume
from glustolibs.gluster.volume_ops import (get_volume_info, volume_status,
volume_list, volume_start)
from glustolibs.io.utils import validate_io_procs
-from glustolibs.gluster.mount_ops import umount_volume
from glustolibs.gluster.snap_ops import (snap_create,
snap_list,
snap_activate,
snap_clone)
from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.gluster.mount_ops import umount_volume
@runs_on([['replicated', 'distributed-replicated', 'dispersed',
@@ -45,7 +48,7 @@ class SnapshotSelfheal(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.clone = "clone1"
cls.mpoint = "/mnt/clone1"
@@ -53,11 +56,9 @@ class SnapshotSelfheal(GlusterBaseClass):
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients")
g.log.info("Successfully uploaded IO scripts to clients ")
@@ -82,23 +83,23 @@ class SnapshotSelfheal(GlusterBaseClass):
"""
# Perform I/O
- g.log.info("Starting to Perform I/O")
all_mounts_procs = []
- for mount_obj in self.mounts:
- g.log.info("Generating data for %s:"
- "%s", mount_obj.client_system, mount_obj.mountpoint)
- # Create files
- g.log.info('Creating files...')
- command = ("python %s create_files -f 100 --fixed-file-size 1k %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, command,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
+ g.log.info("Generating data for %s:"
+ "%s", self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+ # Create files
+ g.log.info('Creating files...')
+ command = ("/usr/bin/env python %s create_files -f 100 "
+ "--fixed-file-size 1k %s" % (self.script_upload_path,
+ self.mounts[0].mountpoint))
+ proc = g.run_async(self.mounts[0].client_system, command,
+ user=self.mounts[0].user)
+ all_mounts_procs.append(proc)
self.io_validation_complete = False
# Validate IO
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
+ validate_io_procs(all_mounts_procs, self.mounts[0]),
"IO failed on some of the clients"
)
self.io_validation_complete = True
@@ -138,6 +139,9 @@ class SnapshotSelfheal(GlusterBaseClass):
g.log.info("Clone Volume %s created successfully from snapshot "
"%s", self.clone, self.snap)
+ # After cloning a volume wait for 5 second to start the volume
+ sleep(5)
+
# Validate clone volumes are started:
g.log.info("starting to Validate clone volumes are started")
ret, _, _ = volume_start(self.mnode, self.clone)
@@ -145,6 +149,16 @@ class SnapshotSelfheal(GlusterBaseClass):
"%s" % self.clone))
g.log.info("Volume %s started successfully", self.clone)
+ for mount_obj in self.mounts:
+ # Unmount Volume
+ g.log.info("Starting to Unmount Volume %s", self.volname)
+ ret = umount_volume(mount_obj.client_system,
+ mount_obj.mountpoint,
+ mtype=self.mount_type)
+ self.assertTrue(ret,
+ ("Failed to Unmount Volume %s" % self.volname))
+ g.log.info("Successfully Unmounted Volume %s", self.volname)
+
# Delete original volume
g.log.info("deleting original volume")
ret = cleanup_volume(self.mnode, self.volname)
@@ -187,7 +201,7 @@ class SnapshotSelfheal(GlusterBaseClass):
def tearDown(self):
# Calling GlusterBaseClass teardown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# Cleanup cloned volume
g.log.info("Starting to delete cloned volume")
@@ -195,13 +209,3 @@ class SnapshotSelfheal(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to delete the cloned volume")
g.log.info("Successful in deleting Cloned volume")
-
- # Unmount Volume
- g.log.info("Starting to Unmount Volume")
- for mount_obj in self.mounts:
- ret = umount_volume(mount_obj.client_system, self.mpoint,
- self.mount_type)
- if not ret:
- raise ExecutionError("Failed to umount the vol "
- "& cleanup Volume")
- g.log.info("Successful in umounting the volume and Cleanup")
diff --git a/tests/functional/snapshot/test_snap_delete_snap_of_volume.py b/tests/functional/snapshot/test_snap_delete_snap_of_volume.py
index fdac20047..afea29379 100644
--- a/tests/functional/snapshot/test_snap_delete_snap_of_volume.py
+++ b/tests/functional/snapshot/test_snap_delete_snap_of_volume.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -37,7 +37,7 @@ class SnapshotDeleteSnapVolume(GlusterBaseClass):
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
@@ -45,7 +45,13 @@ class SnapshotDeleteSnapVolume(GlusterBaseClass):
def tearDown(self):
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
+
+ # delete all snapshot created
+ g.log.info("Deleting all snapshots created")
+ ret, _, _ = snap_delete_all(self.mnode)
+ self.assertEqual(ret, 0, "Failed to delete snapshots")
+ g.log.info("All Snapshots deleted successfully")
# Unmount and cleanup-volume
g.log.info("Unmount and cleanup-volume")
@@ -102,9 +108,3 @@ class SnapshotDeleteSnapVolume(GlusterBaseClass):
"%s" % (self.snap1, self.volname)))
g.log.info("Snapshot %s created successfully"
" for volume %s", self.snap1, self.volname)
-
- # delete all snapshot created
- g.log.info("Deleting all snapshots created")
- ret, _, _ = snap_delete_all(self.mnode)
- self.assertEqual(ret, 0, "Failed to delete snapshots")
- g.log.info("All Snapshots deleted successfully")
diff --git a/tests/functional/snapshot/test_snap_glusterd_down.py b/tests/functional/snapshot/test_snap_glusterd_down.py
index b6c030af9..d18dbe409 100644
--- a/tests/functional/snapshot/test_snap_glusterd_down.py
+++ b/tests/functional/snapshot/test_snap_glusterd_down.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,14 +21,13 @@ Test Cases in this module tests the
snapshot activation and deactivation status
when glusterd is down.
"""
-import time
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.peer_ops import is_peer_connected
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.gluster.gluster_init import (stop_glusterd,
start_glusterd,
- is_glusterd_running)
+ wait_for_glusterd_to_start)
from glustolibs.gluster.snap_ops import (snap_create,
get_snap_info_by_snapname,
get_snap_list, snap_deactivate,
@@ -42,12 +41,12 @@ class SnapshotGlusterddown(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.snap = "snap1"
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -121,15 +120,9 @@ class SnapshotGlusterddown(GlusterBaseClass):
# Check Glusterd status
g.log.info("Check glusterd running or not")
- count = 0
- while count < 80:
- ret = is_glusterd_running(self.servers[1])
- if ret == 1:
- break
- time.sleep(2)
- count += 2
- self.assertEqual(ret, 1, "Unexpected: glusterd running on node %s" %
- self.servers[1])
+ self.assertFalse(
+ wait_for_glusterd_to_start(self.servers[1]),
+ "glusterd is still running on %s" % self.servers[1])
g.log.info("Expected: Glusterd not running on node %s",
self.servers[1])
@@ -158,15 +151,9 @@ class SnapshotGlusterddown(GlusterBaseClass):
# Check Glusterd status
g.log.info("Check glusterd running or not")
- count = 0
- while count < 80:
- ret = is_glusterd_running(self.servers[1])
- if ret:
- break
- time.sleep(2)
- count += 2
- self.assertEqual(ret, 0, "glusterd not running on node %s "
- % self.servers[1])
+ self.assertTrue(
+ wait_for_glusterd_to_start(self.servers[1]),
+ "glusterd is still running on %s" % self.servers[1])
g.log.info("glusterd is running on %s node",
self.servers[1])
@@ -183,15 +170,9 @@ class SnapshotGlusterddown(GlusterBaseClass):
# Check all the peers are in connected state
g.log.info("Validating all the peers are in connected state")
- for servers in self.servers:
- count = 0
- while count < 80:
- ret = is_peer_connected(self.mnode, servers)
- if ret:
- break
- time.sleep(2)
- count += 2
- self.assertTrue(ret, "All the nodes are not in cluster")
+ self.assertTrue(
+ wait_for_peers_to_connect(self.mnode, self.servers),
+ "glusterd is still running on %s" % self.servers)
g.log.info("Successfully validated all the peers")
def tearDown(self):
@@ -202,3 +183,6 @@ class SnapshotGlusterddown(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to umount the vol & cleanup Volume")
g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/snapshot/test_snap_info.py b/tests/functional/snapshot/test_snap_info.py
index 004e88097..6afd8229d 100644
--- a/tests/functional/snapshot/test_snap_info.py
+++ b/tests/functional/snapshot/test_snap_info.py
@@ -38,14 +38,14 @@ class SnapshotInfo(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.snap1 = "snap1"
cls.snap2 = "snap2"
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
diff --git a/tests/functional/snapshot/test_snap_info_glusterd_restart.py b/tests/functional/snapshot/test_snap_info_glusterd_restart.py
index 13d9f0bb7..0d0c1253e 100644
--- a/tests/functional/snapshot/test_snap_info_glusterd_restart.py
+++ b/tests/functional/snapshot/test_snap_info_glusterd_restart.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,123 +14,128 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-"""
-Description:
-
-Test Cases in this module tests the
-snapshot information after glusterd
-is restarted.
-
-"""
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass
-from glustolibs.gluster.gluster_base_class import runs_on
-from glustolibs.gluster.gluster_init import (restart_glusterd,
- is_glusterd_running)
-from glustolibs.gluster.snap_ops import (snap_create,
- get_snap_info,
- get_snap_info_by_volname,
- get_snap_info_by_snapname)
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.gluster_init import (
+ wait_for_glusterd_to_start,
+ restart_glusterd)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
+from glustolibs.gluster.snap_ops import (
+ snap_create,
+ get_snap_info,
+ get_snap_info_by_volname,
+ get_snap_info_by_snapname)
@runs_on([['replicated', 'distributed-replicated', 'dispersed',
'distributed', 'distributed-dispersed'],
- ['glusterfs', 'nfs', 'cifs']])
-class SnapshotInfo(GlusterBaseClass):
-
- @classmethod
- def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
- cls.snap1 = "snap1"
- cls.snap2 = "snap2"
+ ['glusterfs']])
+class TestSnapshotInfoGlusterdRestart(GlusterBaseClass):
+ """
+ Test Cases in this module tests the snapshot information
+ after glusterd is restarted.
+ """
def setUp(self):
+ self.get_super_method(self, 'setUp')()
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUpClass.im_func(self)
- g.log.info("Starting to SetUp Volume")
- ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ ret = self.setup_volume()
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
g.log.info("Volume %s has been setup successfully", self.volname)
+ self.snapshots = [('snap-test-snap-info-gd-restart-%s-%s'
+ % (self.volname, i))for i in range(0, 2)]
def snapshot_info(self):
- # Check snapshot info using snap name
- g.log.info("Checking snapshot info using snap name")
- snap_info_chk = get_snap_info_by_snapname(self.mnode,
- self.snap1)
- self.assertIsNotNone(snap_info_chk, "Failed to get snap info")
- self.assertEqual(snap_info_chk['name'], "%s" % self.snap1,
- "Failed to show snapshot info for %s"
- % self.snap1)
- g.log.info("Successfully checked snapshot info for %s", self.snap1)
+ """
+ This section checks the snapshot information:
+ * Using snapname
+ * Using volname
+ * Without using snapname or volname
+ """
+ # Check snapshot info using snapname
+ for snap in self.snapshots:
+ snap_info_chk = get_snap_info_by_snapname(self.mnode, snap)
+ self.assertIsNotNone(snap_info_chk, "Failed to get snap info")
+ self.assertEqual(snap_info_chk['name'], "%s" % snap,
+ "Failed to show snapshot info for %s" % snap)
+ g.log.info("Successfully validated snapshot info for %s", snap)
# Check snapshot info using volname
- g.log.info("Checking snapshot info using volname")
snap_vol_info = get_snap_info_by_volname(self.mnode, self.volname)
self.assertIsNotNone(snap_vol_info, "Failed to get snap info")
- self.assertEqual(snap_vol_info['originVolume']['name'], "%s"
- % self.volname,
- "Failed to show snapshot info for %s"
- % self.volname)
- g.log.info("Successfully checked snapshot info for %s",
- self.volname)
-
- # Validate snapshot information
- g.log.info("Validating snapshot information")
- info_snaps = get_snap_info(self.mnode)
- self.assertIsNotNone(snap_vol_info, "Failed to get snap info")
- for snap in range(0, 2):
- self.assertEqual(info_snaps[snap]['name'], "snap%s" % snap,
- "Failed to validate"
- "snap information")
- g.log.info("Successfully Validated snap Information")
+ self.assertEqual(snap_vol_info['originVolume']['name'],
+ "%s" % self.volname,
+ "Failed to show snapshot info for %s" % self.volname)
+ g.log.info("Successfully validated snapshot info for %s", self.volname)
- def test_snap_info(self):
+ # Validate snapshot information without using snapname or volname
+ info_snaps = get_snap_info(self.mnode)
+ self.assertIsNotNone(info_snaps, "Failed to get snap info")
+ counter = 0
+ for snap in self.snapshots:
+ self.assertEqual(info_snaps[counter]['name'], snap,
+ "Failed to validate snap information")
+ counter += 1
+ g.log.info("Successfully validated snapshot information")
+
+ def test_snap_info_glusterd_restart(self):
"""
- 1. Create volumes
- 2. create multiple snapshots
- 3. Check snapshot info for snapshots created
- using snap name, using volume name and
- without using snap name and volume name
- 4. restart glusterd
- 5. follow step 3
+ Verify snapshot info before and after glusterd restart
+
+ * Create multiple snapshots
+ * Check snapshot info
+ - Without using snapname or volname
+ - Using snapname
+ - Using volname
+ * Restart glusterd on all servers
+ * Repeat the snapshot info step for all the three scenarios
+ mentioned above
"""
# pylint: disable=too-many-statements
- # Creating snapshot with description
- g.log.info("Starting to Create snapshot")
- for count in range(0, 2):
- self.snap = "snap%s" % count
- ret, _, _ = snap_create(self.mnode, self.volname,
- self.snap,
+ # Create snapshots with description
+ for snap in self.snapshots:
+ ret, _, _ = snap_create(self.mnode, self.volname, snap,
description='$p3C!@l C#@R@cT#R$')
self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
% self.volname))
- g.log.info("Snapshot %s created successfully"
- " for volume %s", self.snap, self.volname)
- self.snapshot_info()
+ g.log.info("Snapshot %s created successfully for volume %s",
+ snap, self.volname)
- # Restart Glusterd on all node
- g.log.info("Restarting Glusterd on all node")
- ret = restart_glusterd(self.servers)
- self.assertTrue(ret, "Failed to stop glusterd")
- g.log.info("Successfully stopped glusterd on all node")
-
- # Check Glusterd status
- g.log.info("Check glusterd running or not")
- ret = is_glusterd_running(self.servers)
- self.assertEqual(ret, 0, "glusterd running on node ")
- g.log.info("glusterd is not running")
+ # Perform the snapshot info tests before glusterd restart
+ self.snapshot_info()
+ # Restart Glusterd on all servers
+ for server in self.servers:
+ ret = restart_glusterd(server)
+ self.assertTrue(ret, ("Failed to restart glusterd on node %s"
+ % server))
+ g.log.info("Successfully restarted glusterd on node %s", server)
+
+ # Wait for glusterd to be online and validate glusterd running on all
+ # server nodes
+ self.assertTrue(
+ wait_for_glusterd_to_start(self.servers),
+ "Unexpected: glusterd not up on one or more of the nodes")
+ g.log.info("Glusterd is up and running on all nodes")
+
+ # Check if peers are connected
+ self.assertTrue(
+ wait_for_peers_to_connect(self.mnode, self.servers),
+ "Unexpected: Peers are not in connected state")
+ g.log.info("Successful: All peers are in connected state")
+
+ # perform the snapshot info tests after glusterd restart
self.snapshot_info()
def tearDown(self):
+ self.get_super_method(self, 'tearDown')()
- # Unmount and cleanup original volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ # Unmount and cleanup volume
+ ret = self.cleanup_volume()
if not ret:
- raise ExecutionError("Failed to umount the vol & cleanup Volume")
- g.log.info("Successful in umounting the volume and Cleanup")
+ raise ExecutionError("Failed to cleanup Volume")
+ g.log.info("Successful in Cleanup volume")
diff --git a/tests/functional/snapshot/test_snap_invalid_cases.py b/tests/functional/snapshot/test_snap_invalid_cases.py
index 6bbd8a20d..e8bdb033a 100644
--- a/tests/functional/snapshot/test_snap_invalid_cases.py
+++ b/tests/functional/snapshot/test_snap_invalid_cases.py
@@ -46,7 +46,7 @@ class SnapshotstatusInvalidcases(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.snap5 = "snap5"
cls.snap1 = "snap1"
cls.volname1 = "volume1"
@@ -54,7 +54,7 @@ class SnapshotstatusInvalidcases(GlusterBaseClass):
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
diff --git a/tests/functional/snapshot/test_snap_invalid_names.py b/tests/functional/snapshot/test_snap_invalid_names.py
index 775add5d8..a11d8ae07 100644
--- a/tests/functional/snapshot/test_snap_invalid_names.py
+++ b/tests/functional/snapshot/test_snap_invalid_names.py
@@ -35,14 +35,14 @@ class SnapshotInvalidNames(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.snap1 = "snap1"
cls.snapinvalid = "#64^@*)"
cls.volname1 = "vola1"
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -51,7 +51,7 @@ class SnapshotInvalidNames(GlusterBaseClass):
def tearDown(self):
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# Unmount and cleanup-volume
g.log.info("Unmount and cleanup-volume")
diff --git a/tests/functional/snapshot/test_snap_list_after_restart.py b/tests/functional/snapshot/test_snap_list_after_restart.py
index 52f34c14a..cbac4b04a 100644
--- a/tests/functional/snapshot/test_snap_list_after_restart.py
+++ b/tests/functional/snapshot/test_snap_list_after_restart.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,116 +14,137 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-"""
-Description:
-
-Test Cases in this module tests the
-snapshot listing before and after
-glusterd restart.
-
-"""
-import time
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.gluster_init import (restart_glusterd,
- is_glusterd_running)
-from glustolibs.gluster.snap_ops import (snap_create, snap_delete,
- get_snap_list)
+from glustolibs.gluster.gluster_init import (
+ wait_for_glusterd_to_start,
+ restart_glusterd)
+from glustolibs.gluster.peer_ops import is_peer_connected
+from glustolibs.gluster.snap_ops import (
+ snap_create,
+ snap_delete,
+ snap_delete_all,
+ get_snap_list)
@runs_on([['replicated', 'distributed-replicated', 'dispersed',
'distributed', 'distributed-dispersed'],
['glusterfs']])
-class SnapshotGlusterddown(GlusterBaseClass):
-
+class TestSnapshotListGlusterdRestart(GlusterBaseClass):
+ """
+ Test Cases in this module tests the snapshot listing
+ before and after glusterd restart.
+ """
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
- g.log.info("Starting to SetUp Volume")
- ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ self.get_super_method(self, 'setUp')()
+ ret = self.setup_volume()
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
g.log.info("Volume %s has been setup successfully", self.volname)
+ self.snapshots = [('snap-test-snap-list-gd-restart-%s-%s'
+ % (self.volname, i))for i in range(0, 3)]
def tearDown(self):
- # Unmount and cleanup original volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
- if not ret:
- raise ExecutionError("Failed to umount the vol & cleanup Volume")
- g.log.info("Successful in umounting the volume and Cleanup")
+ self.get_super_method(self, 'tearDown')()
- def test_snap_delete_and_list_glusterd_down(self):
- # pylint: disable=too-many-statements
+ # Delete snapshots created in the test case
+ ret, _, _ = snap_delete_all(self.mnode)
+ if ret:
+ raise ExecutionError("Failed to delete the snapshots")
+ g.log.info("Successfully deleted all snapshots")
+ # Unmount and cleanup volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup Volume")
+ g.log.info("Successful in Cleanup volume")
+
+ def test_snap_list_glusterd_restart(self):
"""
- Steps:
-
- 1. create a volume
- 2. mount volume
- 3. create 3 snapshot of that volume
- 4. delete snapshot snap1
- 5. list all snapshots created
- 6. restart glusterd
- 7. list all snapshots created
- except snap1
+ Verify snapshot list before and after glusterd restart
+
+ * Create 3 snapshots of the volume
+ * Delete one snapshot
+ * List all snapshots created
+ * Restart glusterd on all nodes
+ * List all snapshots
+ All snapshots must be listed except the one that was deleted
"""
- # Creating snapshot:
- g.log.info("Starting to Create snapshot")
- for snap_count in range(0, 3):
- self.snap = "snap%s" % snap_count
- ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
- self.assertEqual(ret, 0, ("Failed to create snapshot for "
- "volume %s" % self.volname))
+ # pylint: disable=too-many-statements
+ # Create snapshots
+ for snap in self.snapshots:
+ ret, _, _ = snap_create(self.mnode, self.volname, snap)
+ self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
+ "volume %s" % (snap, self.volname)))
g.log.info("Snapshot %s created successfully "
- "for volume %s", self.snap, self.volname)
-
- # delete snap1 snapshot
- g.log.info("Starting to Delete snapshot snap1")
- ret, _, _ = snap_delete(self.mnode, "snap1")
- self.assertEqual(ret, 0, "Failed to delete"
- "snapshot snap1")
- g.log.info("Snapshots snap1 deleted Successfully")
-
- # snapshot list
- g.log.info("Starting to list all snapshots")
- out = get_snap_list(self.mnode)
- self.assertIsNotNone(out, "Failed to list all snapshots")
- self.assertEqual(len(out), 2, "Failed to validate snap list")
+ "for volume %s", snap, self.volname)
+
+ # List the snapshots and validate with snapname
+ snap_list = get_snap_list(self.mnode)
+ self.assertIsNotNone(snap_list, "Failed to list all snapshots")
+ self.assertEqual(len(snap_list), 3, "Failed to validate snap list")
+ g.log.info("Successfully validated snap list")
+ for snap in self.snapshots:
+ self.assertIn(snap, snap_list, "Failed to validate the snapshot "
+ "%s in the snapshot list" % snap)
+ g.log.info("Successfully validated the presence of snapshots using "
+ "snapname")
+
+ # Delete one snapshot
+ ret, _, _ = snap_delete(self.mnode, self.snapshots[0])
+ self.assertEqual(ret, 0, ("Failed to delete snapshot %s"
+ % self.snapshots[0]))
+ g.log.info("Snapshots %s deleted Successfully", self.snapshots[0])
+
+ # List the snapshots and validate with snapname
+ snap_list = get_snap_list(self.mnode)
+ self.assertIsNotNone(snap_list, "Failed to list all snapshots")
+ self.assertEqual(len(snap_list), 2, "Failed to validate snap list")
g.log.info("Successfully validated snap list")
+ for snap in self.snapshots[1:]:
+ self.assertIn(snap, snap_list, "Failed to validate the snapshot "
+ "%s in the snapshot list" % snap)
+ g.log.info("Successfully validated the presence of snapshots using "
+ "snapname")
- # restart Glusterd
- g.log.info("Restarting Glusterd on all nodes")
+ # Restart glusterd on all the servers
ret = restart_glusterd(self.servers)
- self.assertTrue(ret, "Failed to restart glusterd on nodes"
- "%s" % self.servers)
- g.log.info("Successfully restarted glusterd on nodes"
- " %s", self.servers)
-
- # check glusterd running
- g.log.info("Checking glusterd is running or not")
- count = 0
- while count < 80:
- ret = is_glusterd_running(self.servers)
- if ret == 0:
- break
- time.sleep(2)
- count += 1
-
- self.assertEqual(ret, 0, "Failed to validate glusterd "
- "running on nodes %s" % self.servers)
- g.log.info("glusterd is running on "
- "nodes %s", self.servers)
-
- # snapshot list
- g.log.info("Starting to list all snapshots")
- for server in self.servers[0:]:
- out = get_snap_list(server)
- self.assertIsNotNone(out, "Failed to list snap in node"
- "%s" % server)
- self.assertEqual(len(out), 2, "Failed to validate snap list"
- "on node %s" % server)
- g.log.info("Successfully validated snap list on node %s", server)
+ self.assertTrue(ret, ("Failed to restart glusterd on nodes %s"
+ % self.servers))
+ g.log.info("Successfully restarted glusterd on nodes %s", self.servers)
+
+ # Wait for glusterd to be online and validate glusterd running on all
+ # server nodes
+ self.assertTrue(
+ wait_for_glusterd_to_start(self.servers),
+ "Unexpected: glusterd not up on one or more of the nodes")
+ g.log.info("Glusterd is up and running on all nodes")
+
+ # Check if peers are connected
+ self.assertTrue(
+ is_peer_connected(self.mnode, self.servers),
+ "Unexpected: Peers are not in connected state")
+ g.log.info("Successful: All peers are in connected state")
+
+ # List the snapshots after glusterd restart
+ # All snapshots must be listed except the one deleted
+ for server in self.servers:
+ snap_list = get_snap_list(server)
+ self.assertIsNotNone(
+ snap_list, "Failed to get the list of snapshots in node %s"
+ % server)
+ self.assertEqual(
+ len(snap_list), 2,
+ "Unexpected: Number of snapshots not consistent in the node %s"
+ % server)
+ g.log.info("Successfully validated snap list for node %s", server)
+ for snap in self.snapshots[1:]:
+ self.assertIn(
+ snap, snap_list, "Failed to validate the snapshot "
+ "%s in the snapshot list" % snap)
+ g.log.info("Successfully validated the presence of snapshots "
+ "using snapname for node %s", server)
diff --git a/tests/functional/snapshot/test_snap_rebalance.py b/tests/functional/snapshot/test_snap_rebalance.py
index 8b68cd1e9..8cbc18ca5 100644
--- a/tests/functional/snapshot/test_snap_rebalance.py
+++ b/tests/functional/snapshot/test_snap_rebalance.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,7 +21,9 @@ Test Cases in this module tests the
Creation of clone from snapshot of one volume.
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.mount_ops import mount_volume, is_mounted
@@ -43,18 +45,16 @@ class SnapshotRebalance(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.snap = "snap0"
cls.clone = "clone1"
cls.mount1 = "/mnt/clone1"
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients ")
@@ -101,7 +101,7 @@ class SnapshotRebalance(GlusterBaseClass):
def setUp(self):
# SetUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -174,9 +174,10 @@ class SnapshotRebalance(GlusterBaseClass):
# write files to mountpoint
g.log.info("Starting IO on %s mountpoint...", self.mount1)
all_mounts_procs = []
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s" % (self.script_upload_path,
- self.mount1))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ self.mount1))
proc = g.run(self.clients[0], cmd)
all_mounts_procs.append(proc)
@@ -232,4 +233,4 @@ class SnapshotRebalance(GlusterBaseClass):
g.log.info("Successful in umounting the volume and Cleanup")
# Calling GlusterBaseClass teardown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/snapshot/test_snap_scheduler_status.py b/tests/functional/snapshot/test_snap_scheduler_status.py
index e8631232f..a403c7b50 100644
--- a/tests/functional/snapshot/test_snap_scheduler_status.py
+++ b/tests/functional/snapshot/test_snap_scheduler_status.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -13,18 +13,11 @@
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-"""
-Description:
- Test Cases in this module tests the
- snapshot scheduler behavior when shared volume is mounted/not
- mounted. scheduler command such as initialise scheduler,
- enable scheduler, status of scheduler.
-"""
-import time
+
+from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_base_class import (GlusterBaseClass,
- runs_on)
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import get_volume_options
from glustolibs.gluster.snap_scheduler import (scheduler_init,
scheduler_enable,
@@ -32,7 +25,6 @@ from glustolibs.gluster.snap_scheduler import (scheduler_init,
scheduler_disable)
from glustolibs.gluster.shared_storage_ops import (enable_shared_storage,
is_shared_volume_mounted,
- is_shared_volume_unmounted,
disable_shared_storage)
@@ -40,90 +32,143 @@ from glustolibs.gluster.shared_storage_ops import (enable_shared_storage,
'distributed', 'distributed-dispersed'],
['glusterfs']])
class SnapshotSchedulerStatus(GlusterBaseClass):
+ """
+ SnapshotSchedulerStatus includes tests which verify the snap_scheduler
+ functionality WRT the status and shared storage
+ """
def setUp(self):
+ """
+ setup volume for the test
+ """
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
- g.log.info("Starting to SetUp and Mount Volume")
- ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ self.get_super_method(self, 'setUp')()
+ g.log.info("Starting to SetUp Volume")
+ ret = self.setup_volume()
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
g.log.info("Volume %s has been setup successfully", self.volname)
def tearDown(self):
+ """
+ tearDown for every test
+ """
+
+ # disable snap scheduler
+ g.log.info("disabling snap scheduler")
+ ret, _, _ = scheduler_disable(self.mnode)
+ self.assertEqual(ret, 0, "Unexpected: Failed to disable "
+ "snapshot scheduler")
+ g.log.info("Successfully disabled snapshot scheduler")
+
+ # Check snapshot scheduler status
+ g.log.info("checking status of snapshot scheduler")
+ for server in self.servers:
+ count = 0
+ while count < 40:
+ ret, status, _ = scheduler_status(server)
+ status = status.strip().split(":")[2]
+ if not ret and status == ' Disabled':
+ break
+ sleep(2)
+ count += 1
+ self.assertEqual(ret, 0, "Failed to check status of scheduler"
+ " on node %s" % server)
+ g.log.info("Successfully checked scheduler status on %s nodes",
+ server)
+
+ # Check if shared storage is enabled
+ # Disable if true
+ g.log.info("Checking if shared storage is mounted")
+ ret = is_shared_volume_mounted(self.mnode)
+ if ret:
+ g.log.info("Disabling shared storage")
+ ret = disable_shared_storage(self.mnode)
+ if not ret:
+ raise ExecutionError("Failed to disable shared storage")
+ g.log.info("Successfully disabled shared storage")
# Unmount and cleanup-volume
- g.log.info("Starting to Unmount and cleanup-volume")
- ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ g.log.info("Starting to cleanup-volume")
+ ret = self.cleanup_volume()
if not ret:
- raise ExecutionError("Failed to Unmount and Cleanup Volume")
- g.log.info("Successful in Unmount Volume and Cleanup Volume")
+ raise ExecutionError("Failed to Cleanup Volume %s" % self.volname)
+ g.log.info("Successful in Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
def test_snap_scheduler_status(self):
# pylint: disable=too-many-statements
"""
- Steps:
- 1. create volumes
- 2. initialise snap scheduler without
- enabling shared storage should fail
- 3. enable shared storage
- 4. initialise snap scheduler
- 5. check snapshot scheduler status
+ Validating the snapshot scheduler behavior when shared storage
+ volume is mounted/not mounted.
+
+ * Initialise snap_scheduler without enabling shared storage
+ * Enable shared storage
+ * Initialise snap_scheduler on all nodes
+ * Check snap_scheduler status
"""
- # Validate shared storage is enabled
- g.log.info("Starting to validate shared storage volume")
+
+ # Validate shared storage is disabled
+ g.log.info("Validating shared storage is disabled")
volinfo = get_volume_options(self.mnode, self.volname,
- option=("cluster.enable"
- "-shared-storage"))
+ option=("cluster.enable-shared-storage"))
if volinfo["cluster.enable-shared-storage"] == "disable":
# Initialise snapshot scheduler
g.log.info("Initialising snapshot scheduler on all nodes")
ret = scheduler_init(self.servers)
self.assertFalse(ret, "Unexpected: Successfully initialized "
"scheduler on all nodes")
- g.log.info("As Expected, Failed to initialize scheduler on "
+ g.log.info("Expected: Failed to initialize snap_scheduler on "
"all nodes")
self.assertEqual(volinfo["cluster.enable-shared-storage"],
"disable", "Unexpected: Shared storage "
"is enabled on cluster")
- # Enable Shared storage
+ # Enable shared storage
g.log.info("enabling shared storage")
ret = enable_shared_storage(self.mnode)
self.assertTrue(ret, "Failed to enable shared storage")
g.log.info("Successfully enabled shared storage")
- # Validate shared storage mounted
- g.log.info("validate shared storage mounted")
- ret = is_shared_volume_mounted(self.mnode)
- self.assertTrue(ret, "Failed to mount shared volume")
- g.log.info("Successfully mounted shared volume")
+ # Validate shared storage volume is mounted
+ g.log.info("Validating if shared storage volume is mounted")
+ count = 0
+ while count < 5:
+ ret = is_shared_volume_mounted(self.mnode)
+ if ret:
+ break
+ sleep(2)
+ count += 1
+ self.assertTrue(ret, "Failed to validate if shared volume is mounted")
+ g.log.info("Successfully validated shared volume is mounted")
# Validate shared storage volume is enabled
- g.log.info("validate shared storage volume")
+ g.log.info("Validate shared storage is enabled")
volinfo = get_volume_options(self.mnode, self.volname,
- option=("cluster.enable"
- "-shared-storage"))
+ option=("cluster.enable-shared-storage"))
self.assertIsNotNone(volinfo, "Failed to validate volume option")
self.assertEqual(volinfo["cluster.enable-shared-storage"], "enable",
- "Failed to enable shared storage volume")
- g.log.info("Shared storage enabled successfully")
+ "Failed to validate if shared storage is enabled")
+ g.log.info("Successfully validated shared storage is enabled")
- # Initialise snap scheduler
+ # Initialise snap_scheduler on all nodes
g.log.info("Initialising snapshot scheduler on all nodes")
count = 0
+ sleep(2)
while count < 40:
ret = scheduler_init(self.servers)
if ret:
break
- time.sleep(2)
+ sleep(2)
count += 1
self.assertTrue(ret, "Failed to initialize scheduler on all nodes")
g.log.info("Successfully initialized scheduler on all nodes")
- # Enable snap scheduler
- g.log.info("Enabling snap scheduler")
+ # Enable snap_scheduler
+ g.log.info("Enabling snap_scheduler")
ret, _, _ = scheduler_enable(self.mnode)
self.assertEqual(ret, 0, "Failed to enable scheduler on %s node" %
self.mnode)
@@ -135,32 +180,12 @@ class SnapshotSchedulerStatus(GlusterBaseClass):
count = 0
while count < 40:
ret, status, _ = scheduler_status(server)
- if ret == 0:
- self.assertEqual(status.strip().split(":")[2], ' Enabled',
- "Failed to check status of scheduler")
+ status = status.strip().split(":")[2]
+ if ret == 0 and status == ' Enabled':
break
- time.sleep(2)
+ sleep(2)
count += 1
self.assertEqual(ret, 0, "Failed to check status of scheduler"
- " on nodes %s" % server)
+ " on node %s" % server)
g.log.info("Successfully checked scheduler status on %s nodes",
server)
-
- # disable snap scheduler
- g.log.info("disabling snap scheduler")
- ret, _, _ = scheduler_disable(self.mnode)
- self.assertEqual(ret, 0, "Unexpected: Failed to disable "
- "snapshot scheduler")
- g.log.info("Successfully disabled snapshot scheduler")
-
- # disable shared storage
- g.log.info("starting to disable shared storage")
- ret = disable_shared_storage(self.mnode)
- self.assertTrue(ret, "Failed to disable shared storage")
- g.log.info("Successfully disabled shared storage")
-
- # Validate shared volume unmounted
- g.log.info("Validate shared volume unmounted")
- ret = is_shared_volume_unmounted(self.mnode)
- self.assertTrue(ret, "Failed to unmount shared storage")
- g.log.info("Successfully unmounted shared storage")
diff --git a/tests/functional/snapshot/test_snap_self_heal.py b/tests/functional/snapshot/test_snap_self_heal.py
index 10f8a531a..9cc6d8298 100644
--- a/tests/functional/snapshot/test_snap_self_heal.py
+++ b/tests/functional/snapshot/test_snap_self_heal.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,7 +21,9 @@ Test Cases in this module tests the
Creation of clone from snapshot of volume.
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.mount_ops import (mount_volume, umount_volume,
@@ -50,7 +52,7 @@ class SnapshotSelfheal(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
cls.snap = "snap1"
cls.clone = "clone1"
cls.mount1 = "/mnt/clone1"
@@ -60,11 +62,9 @@ class SnapshotSelfheal(GlusterBaseClass):
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", self.clients[0])
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(self.clients[0], script_local_path)
+ ret = upload_scripts(self.clients[0], self.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
self.clients[0])
@@ -72,7 +72,7 @@ class SnapshotSelfheal(GlusterBaseClass):
self.clients[0])
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -149,9 +149,10 @@ class SnapshotSelfheal(GlusterBaseClass):
g.log.info("Starting IO on all mounts...")
g.log.info("mounts: %s", self.mount1)
all_mounts_procs = []
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s"
- % (self.script_upload_path, self.mount1))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ self.mount1))
proc = g.run(self.clients[0], cmd)
all_mounts_procs.append(proc)
g.log.info("Successful in creating I/O on mounts")
@@ -165,10 +166,8 @@ class SnapshotSelfheal(GlusterBaseClass):
g.log.info("Starting to bring bricks to offline")
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks']
+
g.log.info("Brick to bring offline: %s ", bricks_to_bring_offline)
ret = bring_bricks_offline(self.clone, bricks_to_bring_offline)
self.assertTrue(ret, "Failed to bring the bricks offline")
@@ -195,9 +194,10 @@ class SnapshotSelfheal(GlusterBaseClass):
g.log.info("Starting IO on all mounts...")
g.log.info("mounts: %s", self.mount1)
all_mounts_procs = []
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s" % (self.script_upload_path,
- self.mount1))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ self.mount1))
proc = g.run(self.clients[0], cmd)
all_mounts_procs.append(proc)
g.log.info("Successful in creating I/O on mounts")
diff --git a/tests/functional/snapshot/test_snap_status_glusterd_restart.py b/tests/functional/snapshot/test_snap_status_glusterd_restart.py
new file mode 100644
index 000000000..a378737f7
--- /dev/null
+++ b/tests/functional/snapshot/test_snap_status_glusterd_restart.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+
+Test Cases in this module tests the
+snapshot Status when glusterd is restarted.
+
+"""
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.gluster_base_class import runs_on
+from glustolibs.gluster.gluster_init import (restart_glusterd,
+ is_glusterd_running)
+from glustolibs.gluster.snap_ops import (snap_create,
+ get_snap_status,
+ get_snap_status_by_snapname,
+ snap_status_by_volname)
+
+
+@runs_on([['replicated', 'distributed-replicated', 'dispersed',
+ 'distributed', 'distributed-dispersed'],
+ ['glusterfs']])
+class TestSnapshotGlusterdRestart(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+ cls.snapshots = [('snap-test-snap-status-gd-restart-%s-%s'
+ % (cls.volname, i))for i in range(0, 2)]
+
+ def setUp(self):
+
+ # SettingUp volume and Mounting the volume
+ self.get_super_method(self, 'setUp')()
+
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Failed to setup volume %s" % self.volname)
+ g.log.info("Volume %s has been setup successfully", self.volname)
+
+ def tearDown(self):
+
+ # Unmount and cleanup original volume
+
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ def test_snap_status_glusterd_restart(self):
+ # pylint: disable=too-many-statements, too-many-branches
+ """
+ Test Case:
+ 1. Create volume
+ 2. Create two snapshots with description
+ 3. Check snapshot status informations with snapname, volume name and
+ without snap name/volname.
+ 4. Restart glusterd on all nodes
+ 5. Follow step3 again and validate snapshot
+ """
+
+ # Creating snapshot with description
+ for snap in self.snapshots:
+ ret, _, _ = snap_create(self.mnode, self.volname, snap,
+ description='$p3C!@l C#@R@cT#R$')
+ self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
+ % self.volname))
+ g.log.info("Snapshot %s created successfully"
+ " for volume %s", snap, self.volname)
+
+ # Validate snapshot status information
+ # Check snapshot status
+ snap_stat = get_snap_status(self.mnode)
+ self.assertIsNotNone(snap_stat, "failed to get snap status")
+ snap_count = 0
+ for snap in self.snapshots:
+ self.assertEqual(snap_stat[snap_count]['name'],
+ snap, "Failed to show snapshot status")
+ snap_count += 1
+ g.log.info("Successfully checked snapshot status")
+
+ # Check snapshot status using snap name
+ snap_status = get_snap_status_by_snapname(self.mnode,
+ self.snapshots[0])
+ self.assertIsNotNone(snap_status, "failed to get snap status")
+ self.assertEqual(snap_status['name'], "%s" % self.snapshots[0],
+ "Failed to show snapshot "
+ "status for %s" % self.snapshots[0])
+ g.log.info("Successfully checked snapshot status for %s",
+ self.snapshots[0])
+
+ # Check snapshot status using volname
+ ret, snap_vol_status, _ = snap_status_by_volname(self.mnode,
+ self.volname)
+ self.assertEqual(ret, 0, ("Failed to get snapshot statue "
+ "by volume name"))
+ self.assertIsNotNone(snap_vol_status, "failed to get snap status")
+ for snap in self.snapshots:
+ self.assertIn(snap, snap_vol_status,
+ "Failed to validate snapshot name")
+ g.log.info("Successfully validated snapshot status for %s",
+ self.volname)
+
+ # Restart Glusterd on all node
+ ret = restart_glusterd(self.servers)
+ self.assertTrue(ret, "Failed to stop glusterd")
+ g.log.info("Successfully stopped glusterd on all node")
+
+ # Check Glusterd status
+ ret = is_glusterd_running(self.servers)
+ self.assertEqual(ret, 0, "glusterd running on node ")
+ g.log.info("glusterd is not running")
+
+ # Validate snapshot status information
+ # Check snapshot status
+ snap_stat = get_snap_status(self.mnode)
+ self.assertIsNotNone(snap_stat, "failed to get snap status")
+ snap_count = 0
+ for snap in self.snapshots:
+ self.assertEqual(snap_stat[snap_count]['name'],
+ snap, "Failed to show snapshot status")
+ snap_count += 1
+ g.log.info("Successfully checked snapshot status")
+
+ # Check snapshot status using snap name
+ snap_status = get_snap_status_by_snapname(self.mnode,
+ self.snapshots[0])
+ self.assertIsNotNone(snap_status, "failed to get snap status")
+ self.assertEqual(snap_status['name'], "%s" % self.snapshots[0],
+ "Failed to show snapshot "
+ "status for %s" % self.snapshots[0])
+ g.log.info("Successfully checked snapshot status for %s",
+ self.snapshots[0])
+
+ # Check snapshot status using volname
+ ret, snap_vol_status, _ = snap_status_by_volname(self.mnode,
+ self.volname)
+ self.assertEqual(ret, 0, ("Failed to get snapshot statue "
+ "by volume name"))
+ self.assertIsNotNone(snap_vol_status, "failed to get snap status")
+ for snap in self.snapshots:
+ self.assertIn(snap, snap_vol_status,
+ "Failed to validate snapshot status "
+ "using volume name")
+ g.log.info("Successfully validated snapshot status for %s",
+ self.volname)
diff --git a/tests/functional/snapshot/test_snap_uss.py b/tests/functional/snapshot/test_snap_uss.py
index 60e06f359..69887934e 100644
--- a/tests/functional/snapshot/test_snap_uss.py
+++ b/tests/functional/snapshot/test_snap_uss.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,7 +21,9 @@ Test Cases in this module tests the
Creation of snapshot and USS feature.
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import (GlusterBaseClass,
runs_on)
@@ -46,7 +48,7 @@ class SnapshotUssSnap(GlusterBaseClass):
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -56,10 +58,9 @@ class SnapshotUssSnap(GlusterBaseClass):
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", self.clients)
- script_abs_path = "/usr/share/glustolibs/io/scripts/file_dir_ops.py"
self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(self.clients, script_abs_path)
+ ret = upload_scripts(self.clients, self.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients")
@@ -89,9 +90,10 @@ class SnapshotUssSnap(GlusterBaseClass):
g.log.info("mounts: %s", self.mounts)
all_mounts_procs = []
for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s" % (self.script_upload_path,
- mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -127,9 +129,10 @@ class SnapshotUssSnap(GlusterBaseClass):
ret = mkdir(mount_obj.client_system, self.mpoint)
self.assertTrue(ret, "Failed to create .snaps directory")
g.log.info("Successfully created .snaps directory")
- cmd = ("python %s create_files "
- "-f 10 --base-file-name foo %s"
- % (self.script_upload_path, self.mpoint))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name foo %s" % (
+ self.script_upload_path,
+ self.mpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -226,7 +229,7 @@ class SnapshotUssSnap(GlusterBaseClass):
def tearDown(self):
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# deleting created snapshots
g.log.info("Deleting all snapshots")
diff --git a/tests/functional/snapshot/test_snap_uss_snapd.py b/tests/functional/snapshot/test_snap_uss_snapd.py
new file mode 100644
index 000000000..e008a679a
--- /dev/null
+++ b/tests/functional/snapshot/test_snap_uss_snapd.py
@@ -0,0 +1,377 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description:
+ Test Cases in this module tests the USS functionality
+ before and after snapd is killed. validate snapd after
+ volume is started with force option.
+"""
+from os import path
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.mount_ops import (mount_volume,
+ is_mounted, unmount_mounts)
+from glustolibs.gluster.volume_ops import (volume_start,
+ get_volume_info,
+ volume_stop)
+from glustolibs.gluster.volume_libs import (log_volume_info_and_status,
+ cleanup_volume)
+from glustolibs.gluster.snap_ops import (get_snap_list,
+ snap_create,
+ snap_activate,
+ snap_clone, terminate_snapd_on_node)
+from glustolibs.gluster.uss_ops import (is_snapd_running, is_uss_enabled,
+ enable_uss, disable_uss,
+ uss_list_snaps)
+from glustolibs.gluster.mount_ops import create_mount_objs
+from glustolibs.io.utils import validate_io_procs, view_snaps_from_mount
+
+
+@runs_on([['replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'distributed'],
+ ['glusterfs']])
+class SnapshotSnapdCloneVol(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+ cls.mount1 = []
+ cls.mpoint = "/mnt/clone1"
+ cls.server_list = []
+ cls.server_lists = []
+
+ # Upload io scripts for running IO on mounts
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts "
+ "to clients %s" % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+
+ self.snap = 'test_snap_clone_snapd-snap'
+ self.clone_vol1 = 'clone-of-test_snap_clone_snapd-clone1'
+ # SettingUp volume and Mounting the volume
+ self.get_super_method(self, 'setUp')()
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup volume %s" % self.volname)
+ g.log.info("Volume %s has been setup successfully", self.volname)
+
+ def validate_snapd(self, check_condition=True):
+ """ Validate snapd running """
+ for server in self.server_list:
+ ret = is_snapd_running(server, self.clone_vol1)
+ if check_condition:
+ self.assertTrue(
+ ret, "Unexpected: Snapd is Not running for "
+ "volume %s on node %s" % (self.clone_vol1, server))
+ g.log.info(
+ "Snapd Running for volume %s "
+ "on node: %s", self.clone_vol1, server)
+ else:
+ self.assertFalse(
+ ret, "Unexpected: Snapd is running for"
+ "volume %s on node %s" % (self.clone_vol1, server))
+ g.log.info("Expected: Snapd is not Running for volume"
+ " %s on node: %s", self.clone_vol1, server)
+
+ def check_snaps(self):
+ """ Check snapshots under .snaps folder """
+ ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
+ self.assertEqual(ret, 0, "Unexpected: .snaps directory not found")
+ g.log.info("Expected: .snaps directory is present")
+
+ def validate_uss(self):
+ """ Validate USS running """
+ ret = is_uss_enabled(self.mnode, self.clone_vol1)
+ self.assertTrue(ret, "USS is disabled in clone volume "
+ "%s" % self.clone_vol1)
+ g.log.info("USS enabled in cloned Volume %s", self.clone_vol1)
+
+ def validate_snaps(self):
+ """ Validate snapshots under .snaps folder """
+ for count in range(0, 40):
+ ret = view_snaps_from_mount(self.mount1, self.snaps_list)
+ if ret:
+ break
+ sleep(2)
+ count += 1
+ self.assertTrue(ret, "Failed to lists .snaps folder")
+ g.log.info("Successfully validated snapshots from .snaps folder")
+
+ def test_snap_clone_snapd(self):
+ """
+ Steps:
+
+ 1. create a volume
+ 2. Create a snapshots and activate
+ 3. Clone the snapshot and mount it
+ 4. Check for snapd daemon
+ 5. enable uss and validate snapd
+ 5. stop cloned volume
+ 6. Validate snapd
+ 7. start cloned volume
+ 8. validate snapd
+ 9. Create 5 more snapshot
+ 10. Validate total number of
+ snapshots created.
+ 11. Activate 5 snapshots
+ 12. Enable USS
+ 13. Validate snapd
+ 14. kill snapd on all nodes
+ 15. validate snapd running
+ 16. force start clone volume
+ 17. validate snaps inside .snaps directory
+ """
+ # pylint: disable=too-many-statements, too-many-locals
+
+ # Starting I/O
+ all_mounts_procs = []
+ for mount_obj in self.mounts:
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+
+ # Validate I/O
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Creating snapshot
+ ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
+ self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
+ % self.volname))
+ g.log.info("Snapshot %s created successfully for "
+ "volume %s", self.snap, self.volname)
+
+ # Activating created snapshots
+ ret, _, _ = snap_activate(self.mnode, self.snap)
+ self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
+ % self.snap))
+ g.log.info("Snapshot snap%s activated successfully", self.snap)
+
+ # Snapshot list
+ self.assertIsNotNone(
+ get_snap_list(self.mnode), "Failed to list snapshot")
+ g.log.info("Snapshot list command Successful")
+
+ # Creating and starting a Clone of snapshot:
+ ret, _, _ = snap_clone(self.mnode, self.snap, self.clone_vol1)
+ self.assertEqual(ret, 0, "Failed to clone %s" % self.clone_vol1)
+ g.log.info("Clone volume %s created successfully", self.clone_vol1)
+
+ # Start the clone volumes
+ ret, _, _ = volume_start(self.mnode, self.clone_vol1)
+ self.assertEqual(ret, 0, "Failed to start %s" % self.clone_vol1)
+ g.log.info("%s started successfully", self.clone_vol1)
+
+ # Form server list
+ brick_list = get_all_bricks(self.mnode, self.clone_vol1)
+ for bricks in brick_list:
+ self.server_lists.append(bricks.split(":")[0])
+ self.server_list = list(set(self.server_lists))
+
+ # Get volume info
+ vol_info = get_volume_info(self.mnode, self.clone_vol1)
+ self.assertIsNotNone(vol_info, "Failed to get vol info")
+ g.log.info("Successfully in getting vol info")
+
+ # Redefining mounts for cloned volume
+ self.mount_points, self.mounts_dict_list = [], []
+ for client in self.all_clients_info:
+ mount = {
+ 'protocol': self.mount_type,
+ 'server': self.mnode,
+ 'volname': self.volname,
+ 'client': self.all_clients_info[client],
+ 'mountpoint': (path.join(
+ "%s" % self.mpoint)),
+ 'options': ''
+ }
+ self.mounts_dict_list.append(mount)
+ self.mount1 = create_mount_objs(self.mounts_dict_list)
+ self.mount_points.append(self.mpoint)
+ g.log.info("Successfully made entry in self.mount1")
+
+ # FUSE mount clone1 volume
+ for mount_obj in self.mounts:
+ ret, _, _ = mount_volume(self.clone_vol1, self.mount_type,
+ self.mpoint,
+ self.mnode, mount_obj.client_system)
+ self.assertEqual(ret, 0, "Volume mount failed for clone1")
+ g.log.info("%s mounted Successfully", self.clone_vol1)
+
+ # Validate clone volume is mounted or not
+ ret = is_mounted(self.clone_vol1, self.mpoint, self.mnode,
+ mount_obj.client_system, self.mount_type)
+ self.assertTrue(ret, "Volume not mounted on mount point: "
+ "%s" % self.mpoint)
+ g.log.info("Volume %s mounted on %s", self.clone_vol1, self.mpoint)
+
+ # Log Cloned Volume information
+ ret = log_volume_info_and_status(self.mnode, self.clone_vol1)
+ self.assertTrue("Failed to Log Info and Status of Volume "
+ "%s" % self.clone_vol1)
+ g.log.info("Successfully Logged Info and Status")
+
+ # Validate snapd running on all nodes
+ self.validate_snapd(check_condition=False)
+
+ # Enable USS
+ ret, _, _ = enable_uss(self.mnode, self.clone_vol1)
+ self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
+ g.log.info("Successfully enabled USS on Cloned volume")
+
+ # Validate USS running
+ self.validate_uss()
+
+ # Validate snapd running on all nodes
+ self.validate_snapd()
+
+ # Stop cloned volume
+ ret, _, _ = volume_stop(self.mnode, self.clone_vol1)
+ self.assertEqual(ret, 0, "Failed to stop cloned volume "
+ "%s" % self.clone_vol1)
+ g.log.info("Successfully Stopped Cloned volume %s", self.clone_vol1)
+
+ # Validate snapd running on all nodes
+ self.validate_snapd(check_condition=False)
+
+ # Start cloned volume
+ ret, _, _ = volume_start(self.mnode, self.clone_vol1)
+ self.assertEqual(ret, 0, "Failed to start cloned volume"
+ " %s" % self.clone_vol1)
+ g.log.info("Successfully started cloned volume"
+ " %s", self.clone_vol1)
+
+ # Validate snapd running on all nodes
+ self.validate_snapd()
+
+ # Create 5 snapshots
+ self.snaps_list = [('test_snap_clone_snapd-snap%s'
+ % i)for i in range(0, 5)]
+ for snapname in self.snaps_list:
+ ret, _, _ = snap_create(self.mnode, self.clone_vol1,
+ snapname)
+ self.assertEqual(ret, 0, ("Failed to create snapshot for volume"
+ " %s" % self.clone_vol1))
+ g.log.info("Snapshot %s created successfully for volume "
+ "%s", snapname, self.clone_vol1)
+
+ # Validate USS running
+ self.validate_uss()
+
+ # Check snapshot under .snaps directory
+ self.check_snaps()
+
+ # Activate Snapshots
+ for snapname in self.snaps_list:
+ ret, _, _ = snap_activate(self.mnode, snapname)
+ self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
+ % snapname))
+ g.log.info("Snapshot %s activated "
+ "successfully", snapname)
+
+ # Validate USS running
+ self.validate_uss()
+
+ # Validate snapshots under .snaps folder
+ self.validate_snaps()
+
+ # Kill snapd on node and validate snapd except management node
+ for server in self.servers[1:]:
+ ret, _, _ = terminate_snapd_on_node(server)
+ self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
+ % server)
+ g.log.info("snapd Killed Successfully on node %s", server)
+
+ # Check snapd running
+ ret = is_snapd_running(server, self.clone_vol1)
+ self.assertTrue(ret, "Unexpected: Snapd running on node: "
+ "%s" % server)
+ g.log.info("Expected: Snapd is not running on node:%s", server)
+
+ # Check snapshots under .snaps folder
+ g.log.info("Validating snapshots under .snaps")
+ ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
+ self.assertEqual(ret, 0, "Target endpoint not connected")
+ g.log.info("Successfully listed snapshots under .snaps")
+
+ # Kill snapd in management node
+ ret, _, _ = terminate_snapd_on_node(self.servers[0])
+ self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
+ % self.servers[0])
+ g.log.info("snapd Killed Successfully on node %s", self.servers[0])
+
+ # Validate snapd running on all nodes
+ self.validate_snapd(check_condition=False)
+
+ # Validating snapshots under .snaps
+ ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
+ self.assertNotEqual(ret, 0, "Unexpected: Successfully listed "
+ "snapshots under .snaps")
+ g.log.info("Expected: Target endpoint not connected")
+
+ # Start the Cloned volume(force start)
+ ret, _, _ = volume_start(self.mnode, self.clone_vol1, force=True)
+ self.assertEqual(ret, 0, "Failed to start cloned volume "
+ "%s" % self.clone_vol1)
+ g.log.info("Successfully Started Cloned volume %s", self.clone_vol1)
+
+ # Validate snapd running on all nodes
+ self.validate_snapd()
+
+ # Validate snapshots under .snaps folder
+ self.validate_snaps()
+
+ def tearDown(self):
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ # Disable USS on cloned volume
+ ret, _, _ = disable_uss(self.mnode, self.clone_vol1)
+ if ret:
+ raise ExecutionError("Failed to disable USS on cloned volume")
+ g.log.info("Successfully disabled USS on Cloned volume")
+
+ # Cleanup cloned volume
+ ret = unmount_mounts(self.mount1)
+ if not ret:
+ raise ExecutionError("Failed to unmount cloned volume")
+ ret = cleanup_volume(self.mnode, self.clone_vol1)
+ if not ret:
+ raise ExecutionError("Failed to unmount and cleanup cloned volume")
+ g.log.info("Successfully umounted and cleanup cloned volume")
+
+ # Unmount and cleanup-volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
diff --git a/tests/functional/snapshot/test_snap_uss_while_io.py b/tests/functional/snapshot/test_snap_uss_while_io.py
index e8435c579..d11c9663c 100644
--- a/tests/functional/snapshot/test_snap_uss_while_io.py
+++ b/tests/functional/snapshot/test_snap_uss_while_io.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,7 +21,9 @@ Test Cases in this module tests the
uss functionality while io is going on.
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.gluster_base_class import runs_on
@@ -30,7 +32,8 @@ from glustolibs.io.utils import (validate_io_procs,
from glustolibs.gluster.snap_ops import (snap_create,
snap_activate,
snap_list)
-from glustolibs.gluster.uss_ops import (enable_uss, is_uss_enabled,
+from glustolibs.gluster.uss_ops import (disable_uss,
+ enable_uss, is_uss_enabled,
is_snapd_running)
from glustolibs.misc.misc_libs import upload_scripts
@@ -43,15 +46,13 @@ class SnapshotUssWhileIo(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.snap_count = 10
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients ")
@@ -60,7 +61,7 @@ class SnapshotUssWhileIo(GlusterBaseClass):
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume and mount volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -69,6 +70,18 @@ class SnapshotUssWhileIo(GlusterBaseClass):
def tearDown(self):
+ # Validate USS running
+ g.log.info("Validating USS enabled or disabled")
+ ret = is_uss_enabled(self.mnode, self.volname)
+ if not ret:
+ # Disable USS
+ ret, _, _ = disable_uss(self.mnode, self.volname)
+ if not ret:
+ raise ExecutionError("Failed to disable USS on volume"
+ "%s" % self.volname)
+ g.log.info("Successfully disabled USS on volume %s",
+ self.volname)
+
# Unmount and cleanup original volume
g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
@@ -76,6 +89,9 @@ class SnapshotUssWhileIo(GlusterBaseClass):
raise ExecutionError("Failed to umount the vol & cleanup Volume")
g.log.info("Successful in umounting the volume and Cleanup")
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
def test_snap_uss_while_io(self):
# pylint: disable=too-many-statements
"""
@@ -124,8 +140,10 @@ class SnapshotUssWhileIo(GlusterBaseClass):
"%s", mount_obj.client_system, mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
- command = ("python %s create_files -f 100 --fixed-file-size 1M %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ command = ("/usr/bin/env python %s create_files -f 100 "
+ "--fixed-file-size 1M %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
all_mounts_procs.append(proc)
diff --git a/tests/functional/snapshot/test_snapshot_create.py b/tests/functional/snapshot/test_snapshot_create.py
index e1bc9c272..677199f21 100644
--- a/tests/functional/snapshot/test_snapshot_create.py
+++ b/tests/functional/snapshot/test_snapshot_create.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,7 +19,9 @@ Description : The purpose of this test is to validate snapshot create
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -56,16 +58,14 @@ class SnapCreate(GlusterBaseClass):
"""
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -77,7 +77,7 @@ class SnapCreate(GlusterBaseClass):
setUp method
"""
# Setup_Volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to setup and mount volume")
@@ -90,20 +90,14 @@ class SnapCreate(GlusterBaseClass):
ret, _, _ = snap_delete_all(self.mnode)
if ret != 0:
raise ExecutionError("Failed to delete all snaps")
- GlusterBaseClass.tearDown.im_func(self)
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume & mount
- """
- g.log.info("Starting volume and mount cleanup")
- ret = cls.unmount_volume_and_cleanup_volume(cls.mounts)
+ # Unmount and cleanup original volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
- raise ExecutionError("Failed to cleanup volume and mount")
- g.log.info("Cleanup successful for the volume and mount")
-
- GlusterBaseClass.tearDownClass.im_func(cls)
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+ self.get_super_method(self, 'tearDown')()
def test_validate_snaps_create(self):
"""
@@ -158,13 +152,14 @@ class SnapCreate(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
diff --git a/tests/functional/snapshot/test_snapshot_restore.py b/tests/functional/snapshot/test_snapshot_restore.py
index 966551b58..99a82e2b7 100644
--- a/tests/functional/snapshot/test_snapshot_restore.py
+++ b/tests/functional/snapshot/test_snapshot_restore.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,7 +18,9 @@
The purpose of this test is to validate restore of a snapshot.
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -60,16 +62,14 @@ class SnapRestore(GlusterBaseClass):
"""
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -81,7 +81,7 @@ class SnapRestore(GlusterBaseClass):
setUp method
"""
# Setup_Volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
ret = self.setup_volume_and_mount_volume(mounts=self.mounts,
volume_create_force=True)
if not ret:
@@ -95,20 +95,14 @@ class SnapRestore(GlusterBaseClass):
ret, _, _ = snap_delete_all(self.mnode)
if not ret:
raise ExecutionError("Snapshot delete failed.")
- GlusterBaseClass.tearDown.im_func(self)
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume & mount
- """
- g.log.info("Starting volume and mount cleanup")
- ret = cls.unmount_volume_and_cleanup_volume(cls.mounts)
+ # Unmount and cleanup-volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
- raise ExecutionError("Failed to cleanup volume and mount")
- g.log.info("Cleanup successful for the volume and mount")
+ raise ExecutionError("Failed to Unmount and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
- GlusterBaseClass.tearDownClass.im_func(cls)
+ self.get_super_method(self, 'tearDown')()
def test_validate_snaps_restore(self):
# pylint: disable=too-many-statements
@@ -118,13 +112,14 @@ class SnapRestore(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -175,13 +170,14 @@ class SnapRestore(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -254,13 +250,14 @@ class SnapRestore(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
diff --git a/tests/functional/snapshot/test_uss_brick_down.py b/tests/functional/snapshot/test_uss_brick_down.py
index 172b0f291..fbd9644ed 100644
--- a/tests/functional/snapshot/test_uss_brick_down.py
+++ b/tests/functional/snapshot/test_uss_brick_down.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,7 +20,9 @@ Description:
enable USS on the volume when brick is down.
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.gluster_base_class import runs_on
@@ -41,15 +43,13 @@ class SnapUssBrickDown(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients ")
@@ -58,7 +58,7 @@ class SnapUssBrickDown(GlusterBaseClass):
def setUp(self):
# SettingUp and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume and mount volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -115,14 +115,14 @@ class SnapUssBrickDown(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 2 "
"--max-num-of-dirs 2 "
- "--num-of-files 2 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
+ "--num-of-files 2 %s" % (
+ self.script_upload_path,
+ self.counter, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
diff --git a/tests/functional/snapshot/test_uss_snap_active_deactive.py b/tests/functional/snapshot/test_uss_snap_active_deactive.py
index 93c0231ee..fca78f43d 100644
--- a/tests/functional/snapshot/test_uss_snap_active_deactive.py
+++ b/tests/functional/snapshot/test_uss_snap_active_deactive.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,7 +21,9 @@ Description:
snap should not.
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.io.utils import (validate_io_procs,
@@ -42,15 +44,13 @@ class SnapUssActiveD(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients ")
@@ -59,7 +59,7 @@ class SnapUssActiveD(GlusterBaseClass):
def setUp(self):
# SettingUp and Mounting the volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp Volume and mount volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
@@ -121,14 +121,14 @@ class SnapUssActiveD(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 2 "
"--max-num-of-dirs 2 "
- "--num-of-files 2 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
+ "--num-of-files 2 %s" % (
+ self.script_upload_path,
+ self.counter, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
diff --git a/tests/functional/snapshot/test_uss_snap_restore.py b/tests/functional/snapshot/test_uss_snap_restore.py
new file mode 100644
index 000000000..45de07c93
--- /dev/null
+++ b/tests/functional/snapshot/test_uss_snap_restore.py
@@ -0,0 +1,239 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.io.utils import (
+ wait_for_io_to_complete,
+ get_mounts_stat)
+from glustolibs.gluster.snap_ops import (
+ snap_create,
+ get_snap_list,
+ snap_activate,
+ snap_restore_complete)
+from glustolibs.gluster.uss_ops import (
+ enable_uss,
+ is_uss_enabled,
+ get_uss_list_snaps,
+ is_snapd_running,
+ disable_uss)
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.gluster.volume_libs import (
+ verify_all_process_of_volume_are_online)
+
+
+@runs_on([['replicated', 'distributed-replicated', 'dispersed',
+ 'distributed', 'distributed-dispersed'],
+ ['glusterfs', 'nfs']])
+class TestUssSnapRestore(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Upload IO scripts for running IO on mounts
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, [cls.script_upload_path])
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s"
+ % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ self.all_mounts_procs = []
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+ self.snapshots = [('snap-test-uss-snap-restore-%s-%s'
+ % (self.volname, i))for i in range(0, 2)]
+
+ def tearDown(self):
+
+ # Disable uss for volume
+ ret, _, _ = disable_uss(self.mnode, self.volname)
+ if ret:
+ raise ExecutionError("Failed to disable uss")
+ g.log.info("Successfully disabled uss for volume %s", self.volname)
+
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to umount and cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass teardown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_uss_snap_restore(self):
+ """
+ Description:
+ This test case will validate USS after Snapshot restore.
+ The restored snapshot should not be listed under the '.snaps'
+ directory.
+
+ * Perform I/O on mounts
+ * Enable USS on volume
+ * Validate USS is enabled
+ * Create a snapshot
+ * Activate the snapshot
+ * Perform some more I/O
+ * Create another snapshot
+ * Activate the second
+ * Restore volume to the second snapshot
+ * From mount point validate under .snaps
+ - first snapshot should be listed
+ - second snapshot should not be listed
+ """
+
+ # pylint: disable=too-many-statements
+ # Perform I/O
+ cmd = (
+ "/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name firstfiles %s"
+ % (self.script_upload_path,
+ self.mounts[0].mountpoint))
+ proc = g.run_async(
+ self.mounts[0].client_system, cmd, user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Wait for IO to complete and validate IO
+ self.assertTrue(
+ wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
+ "IO failed on %s" % self.mounts[0])
+ g.log.info("IO is successful on all mounts")
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfully got stat of all files/dirs created")
+
+ # Enable USS
+ ret, _, _ = enable_uss(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable USS on volume")
+ g.log.info("Successfully enabled USS on volume")
+
+ # Validate USS is enabled
+ ret = is_uss_enabled(self.mnode, self.volname)
+ self.assertTrue(ret, "USS is disabled on volume %s" % self.volname)
+ g.log.info("USS enabled on volume %s", self.volname)
+
+ # Create a snapshot
+ ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[0])
+ self.assertEqual(ret, 0, ("Failed to create snapshot for %s"
+ % self.volname))
+ g.log.info("Snapshot %s created successfully for volume %s",
+ self.snapshots[0], self.volname)
+
+ # Check for number of snaps using snap_list it should be 1 now
+ snap_list = get_snap_list(self.mnode)
+ self.assertEqual(1, len(snap_list), "No of snaps not consistent "
+ "for volume %s" % self.volname)
+ g.log.info("Successfully validated number of snapshots")
+
+ # Activate the snapshot
+ ret, _, _ = snap_activate(self.mnode, self.snapshots[0])
+ self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
+ % self.snapshots[0]))
+ g.log.info("Snapshot %s activated successfully", self.snapshots[0])
+
+ # Perform I/O
+ self.all_mounts_procs = []
+ cmd = (
+ "/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name secondfiles %s"
+ % (self.script_upload_path,
+ self.mounts[0].mountpoint))
+ proc = g.run_async(
+ self.mounts[0].client_system, cmd, user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Wait for IO to complete and validate IO
+ self.assertTrue(
+ wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
+ "IO failed on %s" % self.mounts[0])
+ g.log.info("IO is successful on all mounts")
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfully got stat of all files/dirs created")
+
+ # Create another snapshot
+ ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[1])
+ self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
+ % self.volname))
+ g.log.info("Snapshot %s created successfully for volume %s",
+ self.snapshots[1], self.volname)
+
+ # Check for number of snaps using snap_list it should be 2 now
+ snap_list = get_snap_list(self.mnode)
+ self.assertEqual(2, len(snap_list), "No of snaps not consistent "
+ "for volume %s" % self.volname)
+ g.log.info("Successfully validated number of snapshots")
+
+ # Activate the second snapshot
+ ret, _, _ = snap_activate(self.mnode, self.snapshots[1])
+ self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
+ % self.snapshots[1]))
+ g.log.info("Snapshot %s activated successfully", self.snapshots[1])
+
+ # Restore volume to the second snapshot
+ ret = snap_restore_complete(
+ self.mnode, self.volname, self.snapshots[1])
+ self.assertTrue(ret, ("Failed to restore snap %s on the "
+ "volume %s" % (self.snapshots[1], self.volname)))
+ g.log.info("Restore of volume is successful from %s on "
+ "volume %s", self.snapshots[1], self.volname)
+
+ # Verify all volume processes are online
+ ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
+ self.assertTrue(ret, "Failed: All volume processes are not online")
+ g.log.info("All volume processes are online")
+ ret = is_snapd_running(self.mnode, self.volname)
+ self.assertTrue(
+ ret, "Failed: snapd is not running for volume %s" % self.volname)
+ g.log.info("Successful: snapd is running")
+
+ # List activated snapshots under the .snaps directory
+ snap_dir_list = get_uss_list_snaps(self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+ self.assertIsNotNone(
+ snap_dir_list, "Failed to list snapshots under .snaps directory")
+ g.log.info("Successfully gathered list of snapshots under the .snaps"
+ " directory")
+
+ # Check for first snapshot as it should get listed here
+ self.assertIn(self.snapshots[0], snap_dir_list,
+ ("Unexpected : %s not listed under .snaps "
+ "directory" % self.snapshots[0]))
+ g.log.info("Activated Snapshot %s listed Successfully",
+ self.snapshots[0])
+
+ # Check for second snapshot as it should not get listed here
+ self.assertNotIn(self.snapshots[1], snap_dir_list,
+ ("Unexpected : %s listed in .snaps "
+ "directory" % self.snapshots[1]))
+ g.log.info("Restored Snapshot %s not listed ", self.snapshots[1])
diff --git a/tests/functional/snapshot/test_validate_snap_del_gd_down.py b/tests/functional/snapshot/test_validate_snap_del_gd_down.py
index fa14b1e1f..45229dbff 100644
--- a/tests/functional/snapshot/test_validate_snap_del_gd_down.py
+++ b/tests/functional/snapshot/test_validate_snap_del_gd_down.py
@@ -39,7 +39,7 @@ class SnapDelWhenGDDown(GlusterBaseClass):
def setUp(self):
# Setting and Mounting the volume
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to Set and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
diff --git a/tests/functional/snapshot/test_validate_snap_scheduler.py b/tests/functional/snapshot/test_validate_snap_scheduler.py
index c8513ca22..3f1cedccc 100644
--- a/tests/functional/snapshot/test_validate_snap_scheduler.py
+++ b/tests/functional/snapshot/test_validate_snap_scheduler.py
@@ -44,7 +44,7 @@ class SnapshotSchedulerBehaviour(GlusterBaseClass):
def setUp(self):
# SettingUp volume and Mounting the volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to SetUp and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
diff --git a/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py b/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py
index 8fcce0096..c1e42517f 100644
--- a/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py
+++ b/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,7 +23,9 @@
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -48,7 +50,7 @@ class TestValidateUss(GlusterBaseClass):
setup volume and initialize necessary variables
"""
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Setup volume and mount
g.log.info("Starting to Setup Volume and Mount Volume")
ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts)
@@ -59,11 +61,9 @@ class TestValidateUss(GlusterBaseClass):
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients %s" % cls.clients)
@@ -89,23 +89,14 @@ class TestValidateUss(GlusterBaseClass):
g.log.info("Successfully disabled uss for volume"
"%s", self.volname)
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
-
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume & mount
- """
- # stopping the volume and clean up the volume
- g.log.info("Starting to Cleanup Volume")
- ret = cls.unmount_volume_and_cleanup_volume(cls.mounts)
+ # Unmount and cleanup-volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
- raise ExecutionError("Failed to Cleanup Volume and mount")
- g.log.info("Successful in Cleanup Volume and mount")
+ raise ExecutionError("Failed to Unmount and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
- # calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDownClass.im_func(cls)
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
def test_validate_snaps_dir_over_uss(self):
@@ -124,14 +115,14 @@ class TestValidateUss(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 2 "
"--max-num-of-dirs 2 "
- "--num-of-files 2 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
+ "--num-of-files 2 %s" % (
+ self.script_upload_path,
+ self.counter, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
@@ -203,9 +194,10 @@ class TestValidateUss(GlusterBaseClass):
g.log.info("Starting IO on all mounts...")
all_mounts_procs = []
for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s/.snaps/abc/"
- % (self.script_upload_path, mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 10 --base-file-name file %s/.snaps/abc/" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
diff --git a/tests/functional/snapshot/test_validate_snapshot_max_limit.py b/tests/functional/snapshot/test_validate_snapshot_max_limit.py
index 3df2a98dc..7832ac5d4 100644
--- a/tests/functional/snapshot/test_validate_snapshot_max_limit.py
+++ b/tests/functional/snapshot/test_validate_snapshot_max_limit.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -43,7 +43,9 @@ Steps :
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -83,16 +85,14 @@ class SnapCreateMax(GlusterBaseClass):
"""
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients %s" % cls.clients)
@@ -104,7 +104,7 @@ class SnapCreateMax(GlusterBaseClass):
setUp method
"""
# Setup_Volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
ret = self.setup_volume_and_mount_volume(mounts=self.mounts,
volume_create_force=True)
if not ret:
@@ -118,7 +118,7 @@ class SnapCreateMax(GlusterBaseClass):
ret, _, _ = snap_delete_all(self.mnode)
if ret != 0:
raise ExecutionError("Failed to delete all snapshots.")
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# Clean up the volume & mount
g.log.info("Starting volume and mount cleanup")
@@ -135,13 +135,14 @@ class SnapCreateMax(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
diff --git a/tests/functional/snapshot/test_validate_snapshot_rebalance.py b/tests/functional/snapshot/test_validate_snapshot_rebalance.py
index f9e018d64..a03064eca 100644
--- a/tests/functional/snapshot/test_validate_snapshot_rebalance.py
+++ b/tests/functional/snapshot/test_validate_snapshot_rebalance.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,7 +20,9 @@ Description : The purpose of this test is to validate snapshot create
"""
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -61,16 +63,14 @@ class SnapCreateRebal(GlusterBaseClass):
"""
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts "
"to clients %s" % cls.clients)
@@ -82,7 +82,7 @@ class SnapCreateRebal(GlusterBaseClass):
setUp method
"""
# Setup_Volume
- GlusterBaseClass.setUpClass.im_func(self)
+ self.get_super_method(self, 'setUp')()
ret = self.setup_volume_and_mount_volume(mounts=self.mounts,
volume_create_force=True)
if not ret:
@@ -96,7 +96,7 @@ class SnapCreateRebal(GlusterBaseClass):
ret, _, _ = snap_delete_all(self.mnode)
if not ret:
raise ExecutionError("Failed to delete all snaps")
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# Clean up the volume & mount
g.log.info("Starting volume and mount cleanup")
@@ -113,13 +113,14 @@ class SnapCreateRebal(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)