diff options
Diffstat (limited to 'tests/functional/snapshot')
27 files changed, 917 insertions, 352 deletions
diff --git a/tests/functional/snapshot/test_256_snapshots.py b/tests/functional/snapshot/test_256_snapshots.py index 75df2ed4f..77aaae591 100644 --- a/tests/functional/snapshot/test_256_snapshots.py +++ b/tests/functional/snapshot/test_256_snapshots.py @@ -14,7 +14,6 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -import sys from time import sleep from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError @@ -99,10 +98,9 @@ class TestValidateSnaps256(GlusterBaseClass): # pylint: disable=too-many-statements # Start IO on all mounts cmd = ( - "/usr/bin/env python%d %s create_files " + "/usr/bin/env python %s create_files " "-f 10 --base-file-name firstfiles %s" - % (sys.version_info.major, - self.script_upload_path, + % (self.script_upload_path, self.mounts[0].mountpoint)) proc = g.run_async( self.mounts[0].client_system, cmd, user=self.mounts[0].user) diff --git a/tests/functional/snapshot/test_activate_deactivate.py b/tests/functional/snapshot/test_activate_deactivate.py index d75931307..e3b46bb9c 100644 --- a/tests/functional/snapshot/test_activate_deactivate.py +++ b/tests/functional/snapshot/test_activate_deactivate.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -58,23 +58,15 @@ class TestActivateDeactivate(GlusterBaseClass): if ret != 0: raise ExecutionError("Snapshot Delete Failed") g.log.info("Successfully deleted all snapshots") - # Calling GlusterBaseClass tearDown - self.get_super_method(self, 'tearDown')() - @classmethod - def tearDownClass(cls): - """ - Clean up the volume & mount - """ - # stopping the volume and clean up the volume - g.log.info("Starting to Cleanup Volume") - ret = cls.cleanup_volume() + # Cleanup-volume + ret = self.cleanup_volume() if not ret: - raise ExecutionError("Failed to Cleanup Volume and mount") - g.log.info("Successful in Cleanup Volume and mount") + raise ExecutionError("Failed to Cleanup Volume") + g.log.info("Successful in Cleanup Volume") - # calling GlusterBaseClass tearDownClass - cls.get_super_method(cls, 'tearDownClass')() + # Calling GlusterBaseClass tearDown + self.get_super_method(self, 'tearDown')() def test_activate_deactivate(self): # pylint: disable=too-many-branches, too-many-statements diff --git a/tests/functional/snapshot/test_activate_on_create.py b/tests/functional/snapshot/test_activate_on_create.py index 82d8401af..939641b9c 100644 --- a/tests/functional/snapshot/test_activate_on_create.py +++ b/tests/functional/snapshot/test_activate_on_create.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -67,23 +67,14 @@ class TestActivateOnCreate(GlusterBaseClass): g.log.info("set_snap_config Success to disable " "activate-on-create") - # Calling GlusterBaseClass tearDown - self.get_super_method(self, 'tearDown')() - - @classmethod - def tearDownClass(cls): - """ - Clean up the volume & mount - """ - # stopping the volume and clean up the volume - g.log.info("Starting to Cleanup Volume") - ret = cls.cleanup_volume() + # Cleanup-volume + ret = self.cleanup_volume() if not ret: - raise ExecutionError("Failed to Cleanup Volume and mount") - g.log.info("Successful in Cleanup Volume and mount") + raise ExecutionError("Failed to Cleanup Volume") + g.log.info("Successful in Cleanup Volume") - # calling GlusterBaseClass tearDownClass - cls.get_super_method(cls, 'tearDownClass')() + # Calling GlusterBaseClass tearDown + self.get_super_method(self, 'tearDown')() def test_activate_on_create(self): # pylint: disable=too-many-branches, too-many-statements diff --git a/tests/functional/snapshot/test_auto_delete.py b/tests/functional/snapshot/test_auto_delete.py index 41aa6dc64..d1e934c02 100644 --- a/tests/functional/snapshot/test_auto_delete.py +++ b/tests/functional/snapshot/test_auto_delete.py @@ -85,13 +85,8 @@ class TestSnapAutoDelete(GlusterBaseClass): "auto-delete")
g.log.info("Successfully set the snapshot config options to default")
- @classmethod
- def tearDownClass(cls):
- # calling GlusterBaseClass tearDownClass
- cls.get_super_method(cls, 'tearDownClass')()
-
- # Clean up the volume
- ret = cls.cleanup_volume()
+ # Cleanup-volume
+ ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to Cleanup Volume")
g.log.info("Successful in Cleanup Volume")
diff --git a/tests/functional/snapshot/test_mount_snap.py b/tests/functional/snapshot/test_mount_snap.py index 914f6a548..ef918ba8b 100644 --- a/tests/functional/snapshot/test_mount_snap.py +++ b/tests/functional/snapshot/test_mount_snap.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -22,7 +22,6 @@ """ import os -import sys from glusto.core import Glusto as g @@ -85,14 +84,17 @@ class TestSnapMountSnapshot(GlusterBaseClass): g.log.info("Starting IO on all mounts...") g.log.info("mounts: %s", self.mounts) all_mounts_procs = [] + self.counter = 1 for mount_obj in self.mounts: - cmd = ("/usr/bin/env python%d %s create_files " - "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + cmd = ("/usr/bin/env python %s create_files " + "-f 10 --base-file-name file%d %s" % ( + self.script_upload_path, + self.counter, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) all_mounts_procs.append(proc) + self.counter += 100 # Validate I/O self.assertTrue( @@ -151,9 +153,9 @@ class TestSnapMountSnapshot(GlusterBaseClass): g.log.info("Starting IO on all mounts...") all_mounts_procs = [] for mount_obj in self.mounts: - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) @@ -170,9 +172,9 @@ class TestSnapMountSnapshot(GlusterBaseClass): g.log.info("mounts: %s", self.mount1) all_mounts_procs = [] for mount_obj in self.mount1: - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) diff --git a/tests/functional/snapshot/test_restore_online_vol.py b/tests/functional/snapshot/test_restore_online_vol.py index 6b188acba..2fa46012b 100644 --- a/tests/functional/snapshot/test_restore_online_vol.py +++ b/tests/functional/snapshot/test_restore_online_vol.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -20,7 +20,6 @@ Description: When we try to restore online volume it should fail. """ -import sys from glusto.core import Glusto as g @@ -105,17 +104,17 @@ class SnapRSOnline(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 2 " "--max-num-of-dirs 2 " "--num-of-files 2 %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, self.counter, mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) + self.counter += 100 self.all_mounts_procs.append(proc) self.io_validation_complete = False diff --git a/tests/functional/snapshot/test_snap_clone_from_snap.py b/tests/functional/snapshot/test_snap_clone_from_snap.py index acb02b7dd..b976cdb41 100644 --- a/tests/functional/snapshot/test_snap_clone_from_snap.py +++ b/tests/functional/snapshot/test_snap_clone_from_snap.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,8 +21,6 @@ Test Cases in this module tests the Creation of clone volume from snapshot. """ -import sys - from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError @@ -92,9 +90,9 @@ class SnapshotCloneValidate(GlusterBaseClass): g.log.info("mounts: %s", self.mounts) all_mounts_procs = [] for mount_obj in self.mounts: - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, mount_obj.mountpoint)) proc = g.run(self.clients[0], cmd) all_mounts_procs.append(proc) diff --git a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py index d8f2f2988..19ad38e21 100644 --- a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py +++ b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,7 +14,6 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -import sys from time import sleep from glusto.core import Glusto as g @@ -181,9 +180,9 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass): g.log.info("Starting IO on all mounts...") all_mounts_procs = [] for mount_obj in self.mounts: - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) diff --git a/tests/functional/snapshot/test_snap_delete_multiple.py b/tests/functional/snapshot/test_snap_delete_multiple.py index 8badee42d..e1be4732f 100644 --- a/tests/functional/snapshot/test_snap_delete_multiple.py +++ b/tests/functional/snapshot/test_snap_delete_multiple.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -22,7 +22,6 @@ Creation of clone from snapshot of volume. """ import os -import sys from glusto.core import Glusto as g @@ -100,7 +99,7 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass): """ # Perform I/O - def io_operation(): + def io_operation(name): g.log.info("Starting to Perform I/O") all_mounts_procs = [] for mount_obj in self.mounts: @@ -108,10 +107,11 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass): mount_obj.client_system, mount_obj.mountpoint) # Create files g.log.info('Creating files...') - command = ("/usr/bin/env python%d %s create_files -f 100 " - "--fixed-file-size 1k %s" % ( - sys.version_info.major, self.script_upload_path, - mount_obj.mountpoint)) + fname = "{}-{}".format(mount_obj.client_system, name) + command = ("/usr/bin/env python {} create_files -f 100 " + "--fixed-file-size 1k --base-file-name {}" + " {}".format(self.script_upload_path, + fname, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, command, user=mount_obj.user) all_mounts_procs.append(proc) @@ -218,29 +218,29 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass): self.assertEqual(ret1, 30, "Failed") ret2 = mount_clone_and_io(self.clone1, self.mpoint1) self.assertEqual(ret2, 0, "Failed to mount volume") - ret = io_operation() + ret = io_operation("first") self.assertEqual(ret, 0, "Failed to perform io") ret3 = create_snap(value2, self.clone1, self.snap2, self.clone2, ret1) self.assertEqual(ret3, 40, "Failed") ret4 = mount_clone_and_io(self.clone2, self.mpoint2) self.assertEqual(ret4, 0, "Failed to mount volume") - ret = io_operation() + ret = io_operation("second") self.assertEqual(ret, 0, "Failed to perform io") ret1 = create_snap(value3, self.clone2, self.snap2, self.clone2, ret3) self.assertEqual(ret1, 0, "Failed to create snapshots") + def tearDown(self): + # Calling GlusterBaseClass teardown + self.get_super_method(self, 'tearDown')() + # delete created snapshots g.log.info("starting to delete all created snapshots") ret, _, _ = snap_delete_all(self.mnode) self.assertEqual(ret, 0, "Failed to delete all snapshots") g.log.info("Successfully deleted all snapshots") - def tearDown(self): - # Calling GlusterBaseClass teardown - self.get_super_method(self, 'tearDown')() - # Disable Activate on create option = {'activate-on-create': 'disable'} ret, _, _ = set_snap_config(self.mnode, option) @@ -251,13 +251,13 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass): # umount clone volume g.log.info("Unmounting clone volume") - ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint1) + ret, _, _ = umount_volume(self.clients[0], self.mpoint1) if ret != 0: raise ExecutionError("Failed to unmount clone " "volume %s" % self.clone1) g.log.info("Successfully unmounted clone volume %s", self.clone1) - ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint2) + ret, _, _ = umount_volume(self.clients[0], self.mpoint2) if ret != 0: raise ExecutionError("Failed to unmount clone " "volume %s" % self.clone2) diff --git a/tests/functional/snapshot/test_snap_delete_original_volume.py b/tests/functional/snapshot/test_snap_delete_original_volume.py index b10355148..249dab4fa 100644 --- a/tests/functional/snapshot/test_snap_delete_original_volume.py +++ b/tests/functional/snapshot/test_snap_delete_original_volume.py @@ -23,7 +23,7 @@ and delete snapshot and original volume. Validate cloned volume is not affected. """ -import sys +from time import sleep from glusto.core import Glusto as g @@ -38,6 +38,7 @@ from glustolibs.gluster.snap_ops import (snap_create, snap_activate, snap_clone) from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.gluster.mount_ops import umount_volume @runs_on([['replicated', 'distributed-replicated', 'dispersed', @@ -82,25 +83,23 @@ class SnapshotSelfheal(GlusterBaseClass): """ # Perform I/O - g.log.info("Starting to Perform I/O") all_mounts_procs = [] - for mount_obj in self.mounts: - g.log.info("Generating data for %s:" - "%s", mount_obj.client_system, mount_obj.mountpoint) - # Create files - g.log.info('Creating files...') - command = ("/usr/bin/env python%d %s create_files -f 100 " - "--fixed-file-size 1k %s" % ( - sys.version_info.major, self.script_upload_path, - mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, command, - user=mount_obj.user) - all_mounts_procs.append(proc) + g.log.info("Generating data for %s:" + "%s", self.mounts[0].client_system, + self.mounts[0].mountpoint) + # Create files + g.log.info('Creating files...') + command = ("/usr/bin/env python %s create_files -f 100 " + "--fixed-file-size 1k %s" % (self.script_upload_path, + self.mounts[0].mountpoint)) + proc = g.run_async(self.mounts[0].client_system, command, + user=self.mounts[0].user) + all_mounts_procs.append(proc) self.io_validation_complete = False # Validate IO self.assertTrue( - validate_io_procs(all_mounts_procs, self.mounts), + validate_io_procs(all_mounts_procs, self.mounts[0]), "IO failed on some of the clients" ) self.io_validation_complete = True @@ -140,6 +139,9 @@ class SnapshotSelfheal(GlusterBaseClass): g.log.info("Clone Volume %s created successfully from snapshot " "%s", self.clone, self.snap) + # After cloning a volume wait for 5 second to start the volume + sleep(5) + # Validate clone volumes are started: g.log.info("starting to Validate clone volumes are started") ret, _, _ = volume_start(self.mnode, self.clone) @@ -147,6 +149,16 @@ class SnapshotSelfheal(GlusterBaseClass): "%s" % self.clone)) g.log.info("Volume %s started successfully", self.clone) + for mount_obj in self.mounts: + # Unmount Volume + g.log.info("Starting to Unmount Volume %s", self.volname) + ret = umount_volume(mount_obj.client_system, + mount_obj.mountpoint, + mtype=self.mount_type) + self.assertTrue(ret, + ("Failed to Unmount Volume %s" % self.volname)) + g.log.info("Successfully Unmounted Volume %s", self.volname) + # Delete original volume g.log.info("deleting original volume") ret = cleanup_volume(self.mnode, self.volname) @@ -197,10 +209,3 @@ class SnapshotSelfheal(GlusterBaseClass): if not ret: raise ExecutionError("Failed to delete the cloned volume") g.log.info("Successful in deleting Cloned volume") - - # Unmount and cleanup-volume - g.log.info("Starting to Unmount and cleanup-volume") - ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) - if not ret: - raise ExecutionError("Failed to Unmount and Cleanup Volume") - g.log.info("Successful in Unmount Volume and Cleanup Volume") diff --git a/tests/functional/snapshot/test_snap_delete_snap_of_volume.py b/tests/functional/snapshot/test_snap_delete_snap_of_volume.py index 6273647ce..afea29379 100644 --- a/tests/functional/snapshot/test_snap_delete_snap_of_volume.py +++ b/tests/functional/snapshot/test_snap_delete_snap_of_volume.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -47,6 +47,12 @@ class SnapshotDeleteSnapVolume(GlusterBaseClass): # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')() + # delete all snapshot created + g.log.info("Deleting all snapshots created") + ret, _, _ = snap_delete_all(self.mnode) + self.assertEqual(ret, 0, "Failed to delete snapshots") + g.log.info("All Snapshots deleted successfully") + # Unmount and cleanup-volume g.log.info("Unmount and cleanup-volume") ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) @@ -102,9 +108,3 @@ class SnapshotDeleteSnapVolume(GlusterBaseClass): "%s" % (self.snap1, self.volname))) g.log.info("Snapshot %s created successfully" " for volume %s", self.snap1, self.volname) - - # delete all snapshot created - g.log.info("Deleting all snapshots created") - ret, _, _ = snap_delete_all(self.mnode) - self.assertEqual(ret, 0, "Failed to delete snapshots") - g.log.info("All Snapshots deleted successfully") diff --git a/tests/functional/snapshot/test_snap_glusterd_down.py b/tests/functional/snapshot/test_snap_glusterd_down.py index 70cf765c5..d18dbe409 100644 --- a/tests/functional/snapshot/test_snap_glusterd_down.py +++ b/tests/functional/snapshot/test_snap_glusterd_down.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,14 +21,13 @@ Test Cases in this module tests the snapshot activation and deactivation status when glusterd is down. """ -import time from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.peer_ops import is_peer_connected +from glustolibs.gluster.peer_ops import wait_for_peers_to_connect from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd, - is_glusterd_running) + wait_for_glusterd_to_start) from glustolibs.gluster.snap_ops import (snap_create, get_snap_info_by_snapname, get_snap_list, snap_deactivate, @@ -121,15 +120,9 @@ class SnapshotGlusterddown(GlusterBaseClass): # Check Glusterd status g.log.info("Check glusterd running or not") - count = 0 - while count < 80: - ret = is_glusterd_running(self.servers[1]) - if ret == 1: - break - time.sleep(2) - count += 2 - self.assertEqual(ret, 1, "Unexpected: glusterd running on node %s" % - self.servers[1]) + self.assertFalse( + wait_for_glusterd_to_start(self.servers[1]), + "glusterd is still running on %s" % self.servers[1]) g.log.info("Expected: Glusterd not running on node %s", self.servers[1]) @@ -158,15 +151,9 @@ class SnapshotGlusterddown(GlusterBaseClass): # Check Glusterd status g.log.info("Check glusterd running or not") - count = 0 - while count < 80: - ret = is_glusterd_running(self.servers[1]) - if ret: - break - time.sleep(2) - count += 2 - self.assertEqual(ret, 0, "glusterd not running on node %s " - % self.servers[1]) + self.assertTrue( + wait_for_glusterd_to_start(self.servers[1]), + "glusterd is still running on %s" % self.servers[1]) g.log.info("glusterd is running on %s node", self.servers[1]) @@ -183,15 +170,9 @@ class SnapshotGlusterddown(GlusterBaseClass): # Check all the peers are in connected state g.log.info("Validating all the peers are in connected state") - for servers in self.servers: - count = 0 - while count < 80: - ret = is_peer_connected(self.mnode, servers) - if ret: - break - time.sleep(2) - count += 2 - self.assertTrue(ret, "All the nodes are not in cluster") + self.assertTrue( + wait_for_peers_to_connect(self.mnode, self.servers), + "glusterd is still running on %s" % self.servers) g.log.info("Successfully validated all the peers") def tearDown(self): @@ -202,3 +183,6 @@ class SnapshotGlusterddown(GlusterBaseClass): if not ret: raise ExecutionError("Failed to umount the vol & cleanup Volume") g.log.info("Successful in umounting the volume and Cleanup") + + # Calling GlusterBaseClass tearDown + self.get_super_method(self, 'tearDown')() diff --git a/tests/functional/snapshot/test_snap_info_glusterd_restart.py b/tests/functional/snapshot/test_snap_info_glusterd_restart.py index a9fc7aa82..0d0c1253e 100644 --- a/tests/functional/snapshot/test_snap_info_glusterd_restart.py +++ b/tests/functional/snapshot/test_snap_info_glusterd_restart.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,123 +14,128 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -""" -Description: - -Test Cases in this module tests the -snapshot information after glusterd -is restarted. - -""" from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass -from glustolibs.gluster.gluster_base_class import runs_on -from glustolibs.gluster.gluster_init import (restart_glusterd, - is_glusterd_running) -from glustolibs.gluster.snap_ops import (snap_create, - get_snap_info, - get_snap_info_by_volname, - get_snap_info_by_snapname) +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.gluster_init import ( + wait_for_glusterd_to_start, + restart_glusterd) +from glustolibs.gluster.peer_ops import wait_for_peers_to_connect +from glustolibs.gluster.snap_ops import ( + snap_create, + get_snap_info, + get_snap_info_by_volname, + get_snap_info_by_snapname) @runs_on([['replicated', 'distributed-replicated', 'dispersed', 'distributed', 'distributed-dispersed'], - ['glusterfs', 'nfs', 'cifs']]) -class SnapshotInfo(GlusterBaseClass): - - @classmethod - def setUpClass(cls): - cls.get_super_method(cls, 'setUpClass')() - cls.snap1 = "snap1" - cls.snap2 = "snap2" + ['glusterfs']]) +class TestSnapshotInfoGlusterdRestart(GlusterBaseClass): + """ + Test Cases in this module tests the snapshot information + after glusterd is restarted. + """ def setUp(self): + self.get_super_method(self, 'setUp')() # SettingUp volume and Mounting the volume - self.get_super_method(self, 'setUp')() - g.log.info("Starting to SetUp Volume") - ret = self.setup_volume_and_mount_volume(mounts=self.mounts) + ret = self.setup_volume() if not ret: raise ExecutionError("Failed to setup volume %s" % self.volname) g.log.info("Volume %s has been setup successfully", self.volname) + self.snapshots = [('snap-test-snap-info-gd-restart-%s-%s' + % (self.volname, i))for i in range(0, 2)] def snapshot_info(self): - # Check snapshot info using snap name - g.log.info("Checking snapshot info using snap name") - snap_info_chk = get_snap_info_by_snapname(self.mnode, - self.snap1) - self.assertIsNotNone(snap_info_chk, "Failed to get snap info") - self.assertEqual(snap_info_chk['name'], "%s" % self.snap1, - "Failed to show snapshot info for %s" - % self.snap1) - g.log.info("Successfully checked snapshot info for %s", self.snap1) + """ + This section checks the snapshot information: + * Using snapname + * Using volname + * Without using snapname or volname + """ + # Check snapshot info using snapname + for snap in self.snapshots: + snap_info_chk = get_snap_info_by_snapname(self.mnode, snap) + self.assertIsNotNone(snap_info_chk, "Failed to get snap info") + self.assertEqual(snap_info_chk['name'], "%s" % snap, + "Failed to show snapshot info for %s" % snap) + g.log.info("Successfully validated snapshot info for %s", snap) # Check snapshot info using volname - g.log.info("Checking snapshot info using volname") snap_vol_info = get_snap_info_by_volname(self.mnode, self.volname) self.assertIsNotNone(snap_vol_info, "Failed to get snap info") - self.assertEqual(snap_vol_info['originVolume']['name'], "%s" - % self.volname, - "Failed to show snapshot info for %s" - % self.volname) - g.log.info("Successfully checked snapshot info for %s", - self.volname) - - # Validate snapshot information - g.log.info("Validating snapshot information") - info_snaps = get_snap_info(self.mnode) - self.assertIsNotNone(snap_vol_info, "Failed to get snap info") - for snap in range(0, 2): - self.assertEqual(info_snaps[snap]['name'], "snap%s" % snap, - "Failed to validate" - "snap information") - g.log.info("Successfully Validated snap Information") + self.assertEqual(snap_vol_info['originVolume']['name'], + "%s" % self.volname, + "Failed to show snapshot info for %s" % self.volname) + g.log.info("Successfully validated snapshot info for %s", self.volname) - def test_snap_info(self): + # Validate snapshot information without using snapname or volname + info_snaps = get_snap_info(self.mnode) + self.assertIsNotNone(info_snaps, "Failed to get snap info") + counter = 0 + for snap in self.snapshots: + self.assertEqual(info_snaps[counter]['name'], snap, + "Failed to validate snap information") + counter += 1 + g.log.info("Successfully validated snapshot information") + + def test_snap_info_glusterd_restart(self): """ - 1. Create volumes - 2. create multiple snapshots - 3. Check snapshot info for snapshots created - using snap name, using volume name and - without using snap name and volume name - 4. restart glusterd - 5. follow step 3 + Verify snapshot info before and after glusterd restart + + * Create multiple snapshots + * Check snapshot info + - Without using snapname or volname + - Using snapname + - Using volname + * Restart glusterd on all servers + * Repeat the snapshot info step for all the three scenarios + mentioned above """ # pylint: disable=too-many-statements - # Creating snapshot with description - g.log.info("Starting to Create snapshot") - for count in range(0, 2): - self.snap = "snap%s" % count - ret, _, _ = snap_create(self.mnode, self.volname, - self.snap, + # Create snapshots with description + for snap in self.snapshots: + ret, _, _ = snap_create(self.mnode, self.volname, snap, description='$p3C!@l C#@R@cT#R$') self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s" % self.volname)) - g.log.info("Snapshot %s created successfully" - " for volume %s", self.snap, self.volname) - self.snapshot_info() - - # Restart Glusterd on all node - g.log.info("Restarting Glusterd on all node") - ret = restart_glusterd(self.servers) - self.assertTrue(ret, "Failed to stop glusterd") - g.log.info("Successfully stopped glusterd on all node") + g.log.info("Snapshot %s created successfully for volume %s", + snap, self.volname) - # Check Glusterd status - g.log.info("Check glusterd running or not") - ret = is_glusterd_running(self.servers) - self.assertEqual(ret, 0, "glusterd running on node ") - g.log.info("glusterd is not running") + # Perform the snapshot info tests before glusterd restart + self.snapshot_info() + # Restart Glusterd on all servers + for server in self.servers: + ret = restart_glusterd(server) + self.assertTrue(ret, ("Failed to restart glusterd on node %s" + % server)) + g.log.info("Successfully restarted glusterd on node %s", server) + + # Wait for glusterd to be online and validate glusterd running on all + # server nodes + self.assertTrue( + wait_for_glusterd_to_start(self.servers), + "Unexpected: glusterd not up on one or more of the nodes") + g.log.info("Glusterd is up and running on all nodes") + + # Check if peers are connected + self.assertTrue( + wait_for_peers_to_connect(self.mnode, self.servers), + "Unexpected: Peers are not in connected state") + g.log.info("Successful: All peers are in connected state") + + # perform the snapshot info tests after glusterd restart self.snapshot_info() def tearDown(self): + self.get_super_method(self, 'tearDown')() - # Unmount and cleanup original volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") - ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + # Unmount and cleanup volume + ret = self.cleanup_volume() if not ret: - raise ExecutionError("Failed to umount the vol & cleanup Volume") - g.log.info("Successful in umounting the volume and Cleanup") + raise ExecutionError("Failed to cleanup Volume") + g.log.info("Successful in Cleanup volume") diff --git a/tests/functional/snapshot/test_snap_rebalance.py b/tests/functional/snapshot/test_snap_rebalance.py index d4062f43b..8cbc18ca5 100644 --- a/tests/functional/snapshot/test_snap_rebalance.py +++ b/tests/functional/snapshot/test_snap_rebalance.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ Test Cases in this module tests the Creation of clone from snapshot of one volume. """ -import sys from glusto.core import Glusto as g @@ -175,9 +174,9 @@ class SnapshotRebalance(GlusterBaseClass): # write files to mountpoint g.log.info("Starting IO on %s mountpoint...", self.mount1) all_mounts_procs = [] - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, self.mount1)) proc = g.run(self.clients[0], cmd) all_mounts_procs.append(proc) diff --git a/tests/functional/snapshot/test_snap_scheduler_status.py b/tests/functional/snapshot/test_snap_scheduler_status.py index fe81c4bc0..a403c7b50 100644 --- a/tests/functional/snapshot/test_snap_scheduler_status.py +++ b/tests/functional/snapshot/test_snap_scheduler_status.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -55,6 +55,29 @@ class SnapshotSchedulerStatus(GlusterBaseClass): tearDown for every test """ + # disable snap scheduler + g.log.info("disabling snap scheduler") + ret, _, _ = scheduler_disable(self.mnode) + self.assertEqual(ret, 0, "Unexpected: Failed to disable " + "snapshot scheduler") + g.log.info("Successfully disabled snapshot scheduler") + + # Check snapshot scheduler status + g.log.info("checking status of snapshot scheduler") + for server in self.servers: + count = 0 + while count < 40: + ret, status, _ = scheduler_status(server) + status = status.strip().split(":")[2] + if not ret and status == ' Disabled': + break + sleep(2) + count += 1 + self.assertEqual(ret, 0, "Failed to check status of scheduler" + " on node %s" % server) + g.log.info("Successfully checked scheduler status on %s nodes", + server) + # Check if shared storage is enabled # Disable if true g.log.info("Checking if shared storage is mounted") @@ -134,6 +157,7 @@ class SnapshotSchedulerStatus(GlusterBaseClass): # Initialise snap_scheduler on all nodes g.log.info("Initialising snapshot scheduler on all nodes") count = 0 + sleep(2) while count < 40: ret = scheduler_init(self.servers) if ret: @@ -156,33 +180,8 @@ class SnapshotSchedulerStatus(GlusterBaseClass): count = 0 while count < 40: ret, status, _ = scheduler_status(server) - if ret == 0: - self.assertEqual(status.strip().split(":")[2], ' Enabled', - "Failed to check status of scheduler") - break - sleep(2) - count += 1 - self.assertEqual(ret, 0, "Failed to check status of scheduler" - " on node %s" % server) - g.log.info("Successfully checked scheduler status on %s nodes", - server) - - # disable snap scheduler - g.log.info("disabling snap scheduler") - ret, _, _ = scheduler_disable(self.mnode) - self.assertEqual(ret, 0, "Unexpected: Failed to disable " - "snapshot scheduler") - g.log.info("Successfully disabled snapshot scheduler") - - # Check snapshot scheduler status - g.log.info("checking status of snapshot scheduler") - for server in self.servers: - count = 0 - while count < 40: - ret, status, _ = scheduler_status(server) - if not ret: - self.assertEqual(status.strip().split(":")[2], ' Disabled', - "Failed to check status of scheduler") + status = status.strip().split(":")[2] + if ret == 0 and status == ' Enabled': break sleep(2) count += 1 diff --git a/tests/functional/snapshot/test_snap_self_heal.py b/tests/functional/snapshot/test_snap_self_heal.py index 89f345d66..9cc6d8298 100644 --- a/tests/functional/snapshot/test_snap_self_heal.py +++ b/tests/functional/snapshot/test_snap_self_heal.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ Test Cases in this module tests the Creation of clone from snapshot of volume. """ -import sys from glusto.core import Glusto as g @@ -150,9 +149,9 @@ class SnapshotSelfheal(GlusterBaseClass): g.log.info("Starting IO on all mounts...") g.log.info("mounts: %s", self.mount1) all_mounts_procs = [] - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, self.mount1)) proc = g.run(self.clients[0], cmd) all_mounts_procs.append(proc) @@ -167,10 +166,8 @@ class SnapshotSelfheal(GlusterBaseClass): g.log.info("Starting to bring bricks to offline") bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( self.mnode, self.volname)) - bricks_to_bring_offline = list(filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks']))) + bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks'] + g.log.info("Brick to bring offline: %s ", bricks_to_bring_offline) ret = bring_bricks_offline(self.clone, bricks_to_bring_offline) self.assertTrue(ret, "Failed to bring the bricks offline") @@ -197,9 +194,9 @@ class SnapshotSelfheal(GlusterBaseClass): g.log.info("Starting IO on all mounts...") g.log.info("mounts: %s", self.mount1) all_mounts_procs = [] - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, self.mount1)) proc = g.run(self.clients[0], cmd) all_mounts_procs.append(proc) diff --git a/tests/functional/snapshot/test_snap_uss.py b/tests/functional/snapshot/test_snap_uss.py index 5371b7439..69887934e 100644 --- a/tests/functional/snapshot/test_snap_uss.py +++ b/tests/functional/snapshot/test_snap_uss.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ Test Cases in this module tests the Creation of snapshot and USS feature. """ -import sys from glusto.core import Glusto as g @@ -91,9 +90,9 @@ class SnapshotUssSnap(GlusterBaseClass): g.log.info("mounts: %s", self.mounts) all_mounts_procs = [] for mount_obj in self.mounts: - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) @@ -130,9 +129,9 @@ class SnapshotUssSnap(GlusterBaseClass): ret = mkdir(mount_obj.client_system, self.mpoint) self.assertTrue(ret, "Failed to create .snaps directory") g.log.info("Successfully created .snaps directory") - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name foo %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, self.mpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) diff --git a/tests/functional/snapshot/test_snap_uss_snapd.py b/tests/functional/snapshot/test_snap_uss_snapd.py new file mode 100644 index 000000000..e008a679a --- /dev/null +++ b/tests/functional/snapshot/test_snap_uss_snapd.py @@ -0,0 +1,377 @@ +# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com> +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" + Description: + Test Cases in this module tests the USS functionality + before and after snapd is killed. validate snapd after + volume is started with force option. +""" +from os import path +from time import sleep +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.gluster.brick_libs import get_all_bricks +from glustolibs.gluster.mount_ops import (mount_volume, + is_mounted, unmount_mounts) +from glustolibs.gluster.volume_ops import (volume_start, + get_volume_info, + volume_stop) +from glustolibs.gluster.volume_libs import (log_volume_info_and_status, + cleanup_volume) +from glustolibs.gluster.snap_ops import (get_snap_list, + snap_create, + snap_activate, + snap_clone, terminate_snapd_on_node) +from glustolibs.gluster.uss_ops import (is_snapd_running, is_uss_enabled, + enable_uss, disable_uss, + uss_list_snaps) +from glustolibs.gluster.mount_ops import create_mount_objs +from glustolibs.io.utils import validate_io_procs, view_snaps_from_mount + + +@runs_on([['replicated', 'distributed-replicated', + 'dispersed', 'distributed-dispersed', 'distributed'], + ['glusterfs']]) +class SnapshotSnapdCloneVol(GlusterBaseClass): + + @classmethod + def setUpClass(cls): + cls.get_super_method(cls, 'setUpClass')() + cls.mount1 = [] + cls.mpoint = "/mnt/clone1" + cls.server_list = [] + cls.server_lists = [] + + # Upload io scripts for running IO on mounts + cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + ret = upload_scripts(cls.clients, cls.script_upload_path) + if not ret: + raise ExecutionError("Failed to upload IO scripts " + "to clients %s" % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) + + def setUp(self): + + self.snap = 'test_snap_clone_snapd-snap' + self.clone_vol1 = 'clone-of-test_snap_clone_snapd-clone1' + # SettingUp volume and Mounting the volume + self.get_super_method(self, 'setUp')() + ret = self.setup_volume_and_mount_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to setup volume %s" % self.volname) + g.log.info("Volume %s has been setup successfully", self.volname) + + def validate_snapd(self, check_condition=True): + """ Validate snapd running """ + for server in self.server_list: + ret = is_snapd_running(server, self.clone_vol1) + if check_condition: + self.assertTrue( + ret, "Unexpected: Snapd is Not running for " + "volume %s on node %s" % (self.clone_vol1, server)) + g.log.info( + "Snapd Running for volume %s " + "on node: %s", self.clone_vol1, server) + else: + self.assertFalse( + ret, "Unexpected: Snapd is running for" + "volume %s on node %s" % (self.clone_vol1, server)) + g.log.info("Expected: Snapd is not Running for volume" + " %s on node: %s", self.clone_vol1, server) + + def check_snaps(self): + """ Check snapshots under .snaps folder """ + ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint) + self.assertEqual(ret, 0, "Unexpected: .snaps directory not found") + g.log.info("Expected: .snaps directory is present") + + def validate_uss(self): + """ Validate USS running """ + ret = is_uss_enabled(self.mnode, self.clone_vol1) + self.assertTrue(ret, "USS is disabled in clone volume " + "%s" % self.clone_vol1) + g.log.info("USS enabled in cloned Volume %s", self.clone_vol1) + + def validate_snaps(self): + """ Validate snapshots under .snaps folder """ + for count in range(0, 40): + ret = view_snaps_from_mount(self.mount1, self.snaps_list) + if ret: + break + sleep(2) + count += 1 + self.assertTrue(ret, "Failed to lists .snaps folder") + g.log.info("Successfully validated snapshots from .snaps folder") + + def test_snap_clone_snapd(self): + """ + Steps: + + 1. create a volume + 2. Create a snapshots and activate + 3. Clone the snapshot and mount it + 4. Check for snapd daemon + 5. enable uss and validate snapd + 5. stop cloned volume + 6. Validate snapd + 7. start cloned volume + 8. validate snapd + 9. Create 5 more snapshot + 10. Validate total number of + snapshots created. + 11. Activate 5 snapshots + 12. Enable USS + 13. Validate snapd + 14. kill snapd on all nodes + 15. validate snapd running + 16. force start clone volume + 17. validate snaps inside .snaps directory + """ + # pylint: disable=too-many-statements, too-many-locals + + # Starting I/O + all_mounts_procs = [] + for mount_obj in self.mounts: + cmd = ("/usr/bin/env python %s create_files " + "-f 10 --base-file-name file %s" % ( + self.script_upload_path, + mount_obj.mountpoint)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + all_mounts_procs.append(proc) + + # Validate I/O + ret = validate_io_procs(all_mounts_procs, self.mounts) + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + # Creating snapshot + ret, _, _ = snap_create(self.mnode, self.volname, self.snap) + self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s" + % self.volname)) + g.log.info("Snapshot %s created successfully for " + "volume %s", self.snap, self.volname) + + # Activating created snapshots + ret, _, _ = snap_activate(self.mnode, self.snap) + self.assertEqual(ret, 0, ("Failed to activate snapshot %s" + % self.snap)) + g.log.info("Snapshot snap%s activated successfully", self.snap) + + # Snapshot list + self.assertIsNotNone( + get_snap_list(self.mnode), "Failed to list snapshot") + g.log.info("Snapshot list command Successful") + + # Creating and starting a Clone of snapshot: + ret, _, _ = snap_clone(self.mnode, self.snap, self.clone_vol1) + self.assertEqual(ret, 0, "Failed to clone %s" % self.clone_vol1) + g.log.info("Clone volume %s created successfully", self.clone_vol1) + + # Start the clone volumes + ret, _, _ = volume_start(self.mnode, self.clone_vol1) + self.assertEqual(ret, 0, "Failed to start %s" % self.clone_vol1) + g.log.info("%s started successfully", self.clone_vol1) + + # Form server list + brick_list = get_all_bricks(self.mnode, self.clone_vol1) + for bricks in brick_list: + self.server_lists.append(bricks.split(":")[0]) + self.server_list = list(set(self.server_lists)) + + # Get volume info + vol_info = get_volume_info(self.mnode, self.clone_vol1) + self.assertIsNotNone(vol_info, "Failed to get vol info") + g.log.info("Successfully in getting vol info") + + # Redefining mounts for cloned volume + self.mount_points, self.mounts_dict_list = [], [] + for client in self.all_clients_info: + mount = { + 'protocol': self.mount_type, + 'server': self.mnode, + 'volname': self.volname, + 'client': self.all_clients_info[client], + 'mountpoint': (path.join( + "%s" % self.mpoint)), + 'options': '' + } + self.mounts_dict_list.append(mount) + self.mount1 = create_mount_objs(self.mounts_dict_list) + self.mount_points.append(self.mpoint) + g.log.info("Successfully made entry in self.mount1") + + # FUSE mount clone1 volume + for mount_obj in self.mounts: + ret, _, _ = mount_volume(self.clone_vol1, self.mount_type, + self.mpoint, + self.mnode, mount_obj.client_system) + self.assertEqual(ret, 0, "Volume mount failed for clone1") + g.log.info("%s mounted Successfully", self.clone_vol1) + + # Validate clone volume is mounted or not + ret = is_mounted(self.clone_vol1, self.mpoint, self.mnode, + mount_obj.client_system, self.mount_type) + self.assertTrue(ret, "Volume not mounted on mount point: " + "%s" % self.mpoint) + g.log.info("Volume %s mounted on %s", self.clone_vol1, self.mpoint) + + # Log Cloned Volume information + ret = log_volume_info_and_status(self.mnode, self.clone_vol1) + self.assertTrue("Failed to Log Info and Status of Volume " + "%s" % self.clone_vol1) + g.log.info("Successfully Logged Info and Status") + + # Validate snapd running on all nodes + self.validate_snapd(check_condition=False) + + # Enable USS + ret, _, _ = enable_uss(self.mnode, self.clone_vol1) + self.assertEqual(ret, 0, "Failed to enable USS on cloned volume") + g.log.info("Successfully enabled USS on Cloned volume") + + # Validate USS running + self.validate_uss() + + # Validate snapd running on all nodes + self.validate_snapd() + + # Stop cloned volume + ret, _, _ = volume_stop(self.mnode, self.clone_vol1) + self.assertEqual(ret, 0, "Failed to stop cloned volume " + "%s" % self.clone_vol1) + g.log.info("Successfully Stopped Cloned volume %s", self.clone_vol1) + + # Validate snapd running on all nodes + self.validate_snapd(check_condition=False) + + # Start cloned volume + ret, _, _ = volume_start(self.mnode, self.clone_vol1) + self.assertEqual(ret, 0, "Failed to start cloned volume" + " %s" % self.clone_vol1) + g.log.info("Successfully started cloned volume" + " %s", self.clone_vol1) + + # Validate snapd running on all nodes + self.validate_snapd() + + # Create 5 snapshots + self.snaps_list = [('test_snap_clone_snapd-snap%s' + % i)for i in range(0, 5)] + for snapname in self.snaps_list: + ret, _, _ = snap_create(self.mnode, self.clone_vol1, + snapname) + self.assertEqual(ret, 0, ("Failed to create snapshot for volume" + " %s" % self.clone_vol1)) + g.log.info("Snapshot %s created successfully for volume " + "%s", snapname, self.clone_vol1) + + # Validate USS running + self.validate_uss() + + # Check snapshot under .snaps directory + self.check_snaps() + + # Activate Snapshots + for snapname in self.snaps_list: + ret, _, _ = snap_activate(self.mnode, snapname) + self.assertEqual(ret, 0, ("Failed to activate snapshot %s" + % snapname)) + g.log.info("Snapshot %s activated " + "successfully", snapname) + + # Validate USS running + self.validate_uss() + + # Validate snapshots under .snaps folder + self.validate_snaps() + + # Kill snapd on node and validate snapd except management node + for server in self.servers[1:]: + ret, _, _ = terminate_snapd_on_node(server) + self.assertEqual(ret, 0, "Failed to Kill snapd on node %s" + % server) + g.log.info("snapd Killed Successfully on node %s", server) + + # Check snapd running + ret = is_snapd_running(server, self.clone_vol1) + self.assertTrue(ret, "Unexpected: Snapd running on node: " + "%s" % server) + g.log.info("Expected: Snapd is not running on node:%s", server) + + # Check snapshots under .snaps folder + g.log.info("Validating snapshots under .snaps") + ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint) + self.assertEqual(ret, 0, "Target endpoint not connected") + g.log.info("Successfully listed snapshots under .snaps") + + # Kill snapd in management node + ret, _, _ = terminate_snapd_on_node(self.servers[0]) + self.assertEqual(ret, 0, "Failed to Kill snapd on node %s" + % self.servers[0]) + g.log.info("snapd Killed Successfully on node %s", self.servers[0]) + + # Validate snapd running on all nodes + self.validate_snapd(check_condition=False) + + # Validating snapshots under .snaps + ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint) + self.assertNotEqual(ret, 0, "Unexpected: Successfully listed " + "snapshots under .snaps") + g.log.info("Expected: Target endpoint not connected") + + # Start the Cloned volume(force start) + ret, _, _ = volume_start(self.mnode, self.clone_vol1, force=True) + self.assertEqual(ret, 0, "Failed to start cloned volume " + "%s" % self.clone_vol1) + g.log.info("Successfully Started Cloned volume %s", self.clone_vol1) + + # Validate snapd running on all nodes + self.validate_snapd() + + # Validate snapshots under .snaps folder + self.validate_snaps() + + def tearDown(self): + + # Calling GlusterBaseClass tearDown + self.get_super_method(self, 'tearDown')() + + # Disable USS on cloned volume + ret, _, _ = disable_uss(self.mnode, self.clone_vol1) + if ret: + raise ExecutionError("Failed to disable USS on cloned volume") + g.log.info("Successfully disabled USS on Cloned volume") + + # Cleanup cloned volume + ret = unmount_mounts(self.mount1) + if not ret: + raise ExecutionError("Failed to unmount cloned volume") + ret = cleanup_volume(self.mnode, self.clone_vol1) + if not ret: + raise ExecutionError("Failed to unmount and cleanup cloned volume") + g.log.info("Successfully umounted and cleanup cloned volume") + + # Unmount and cleanup-volume + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to Unmount and Cleanup Volume") + g.log.info("Successful in Unmount Volume and Cleanup Volume") diff --git a/tests/functional/snapshot/test_snap_uss_while_io.py b/tests/functional/snapshot/test_snap_uss_while_io.py index b51e92998..d11c9663c 100644 --- a/tests/functional/snapshot/test_snap_uss_while_io.py +++ b/tests/functional/snapshot/test_snap_uss_while_io.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ Test Cases in this module tests the uss functionality while io is going on. """ -import sys from glusto.core import Glusto as g @@ -33,7 +32,8 @@ from glustolibs.io.utils import (validate_io_procs, from glustolibs.gluster.snap_ops import (snap_create, snap_activate, snap_list) -from glustolibs.gluster.uss_ops import (enable_uss, is_uss_enabled, +from glustolibs.gluster.uss_ops import (disable_uss, + enable_uss, is_uss_enabled, is_snapd_running) from glustolibs.misc.misc_libs import upload_scripts @@ -70,6 +70,18 @@ class SnapshotUssWhileIo(GlusterBaseClass): def tearDown(self): + # Validate USS running + g.log.info("Validating USS enabled or disabled") + ret = is_uss_enabled(self.mnode, self.volname) + if not ret: + # Disable USS + ret, _, _ = disable_uss(self.mnode, self.volname) + if not ret: + raise ExecutionError("Failed to disable USS on volume" + "%s" % self.volname) + g.log.info("Successfully disabled USS on volume %s", + self.volname) + # Unmount and cleanup original volume g.log.info("Starting to Unmount Volume and Cleanup Volume") ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) @@ -77,6 +89,9 @@ class SnapshotUssWhileIo(GlusterBaseClass): raise ExecutionError("Failed to umount the vol & cleanup Volume") g.log.info("Successful in umounting the volume and Cleanup") + # Calling GlusterBaseClass tearDown + self.get_super_method(self, 'tearDown')() + def test_snap_uss_while_io(self): # pylint: disable=too-many-statements """ @@ -125,9 +140,9 @@ class SnapshotUssWhileIo(GlusterBaseClass): "%s", mount_obj.client_system, mount_obj.mountpoint) # Create files g.log.info('Creating files...') - command = ("/usr/bin/env python%d %s create_files -f 100 " + command = ("/usr/bin/env python %s create_files -f 100 " "--fixed-file-size 1M %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, command, user=mount_obj.user) diff --git a/tests/functional/snapshot/test_snapshot_create.py b/tests/functional/snapshot/test_snapshot_create.py index a8e6c1c47..677199f21 100644 --- a/tests/functional/snapshot/test_snapshot_create.py +++ b/tests/functional/snapshot/test_snapshot_create.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -19,7 +19,6 @@ Description : The purpose of this test is to validate snapshot create """ -import sys from glusto.core import Glusto as g @@ -91,20 +90,14 @@ class SnapCreate(GlusterBaseClass): ret, _, _ = snap_delete_all(self.mnode) if ret != 0: raise ExecutionError("Failed to delete all snaps") - self.get_super_method(self, 'tearDown')() - @classmethod - def tearDownClass(cls): - """ - Clean up the volume & mount - """ - g.log.info("Starting volume and mount cleanup") - ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) + # Unmount and cleanup original volume + g.log.info("Starting to Unmount Volume and Cleanup Volume") + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: - raise ExecutionError("Failed to cleanup volume and mount") - g.log.info("Cleanup successful for the volume and mount") - - cls.get_super_method(cls, 'tearDownClass')() + raise ExecutionError("Failed to umount the vol & cleanup Volume") + g.log.info("Successful in umounting the volume and Cleanup") + self.get_super_method(self, 'tearDown')() def test_validate_snaps_create(self): """ @@ -159,13 +152,13 @@ class SnapCreate(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 10 " "--max-num-of-dirs 5 " "--num-of-files 5 %s" % ( - sys.version_info.major, self.script_upload_path, count, + self.script_upload_path, count, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) diff --git a/tests/functional/snapshot/test_snapshot_restore.py b/tests/functional/snapshot/test_snapshot_restore.py index 326ebc078..99a82e2b7 100644 --- a/tests/functional/snapshot/test_snapshot_restore.py +++ b/tests/functional/snapshot/test_snapshot_restore.py @@ -1,4 +1,4 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -18,7 +18,6 @@ The purpose of this test is to validate restore of a snapshot. """ -import sys from glusto.core import Glusto as g @@ -96,20 +95,14 @@ class SnapRestore(GlusterBaseClass): ret, _, _ = snap_delete_all(self.mnode) if not ret: raise ExecutionError("Snapshot delete failed.") - self.get_super_method(self, 'tearDown')() - @classmethod - def tearDownClass(cls): - """ - Clean up the volume & mount - """ - g.log.info("Starting volume and mount cleanup") - ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) + # Unmount and cleanup-volume + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: - raise ExecutionError("Failed to cleanup volume and mount") - g.log.info("Cleanup successful for the volume and mount") + raise ExecutionError("Failed to Unmount and Cleanup Volume") + g.log.info("Successful in Unmount Volume and Cleanup Volume") - cls.get_super_method(cls, 'tearDownClass')() + self.get_super_method(self, 'tearDown')() def test_validate_snaps_restore(self): # pylint: disable=too-many-statements @@ -119,13 +112,13 @@ class SnapRestore(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 10 " "--max-num-of-dirs 5 " "--num-of-files 5 %s" % ( - sys.version_info.major, self.script_upload_path, count, + self.script_upload_path, count, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) @@ -177,13 +170,13 @@ class SnapRestore(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 10 " "--max-num-of-dirs 5 " "--num-of-files 5 %s" % ( - sys.version_info.major, self.script_upload_path, count, + self.script_upload_path, count, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) @@ -257,13 +250,13 @@ class SnapRestore(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 10 " "--max-num-of-dirs 5 " "--num-of-files 5 %s" % ( - sys.version_info.major, self.script_upload_path, count, + self.script_upload_path, count, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) diff --git a/tests/functional/snapshot/test_uss_brick_down.py b/tests/functional/snapshot/test_uss_brick_down.py index 4fad0942f..fbd9644ed 100644 --- a/tests/functional/snapshot/test_uss_brick_down.py +++ b/tests/functional/snapshot/test_uss_brick_down.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -20,7 +20,6 @@ Description: enable USS on the volume when brick is down. """ -import sys from glusto.core import Glusto as g @@ -116,13 +115,13 @@ class SnapUssBrickDown(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 2 " "--max-num-of-dirs 2 " "--num-of-files 2 %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, self.counter, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, diff --git a/tests/functional/snapshot/test_uss_snap_active_deactive.py b/tests/functional/snapshot/test_uss_snap_active_deactive.py index 2233a88a8..fca78f43d 100644 --- a/tests/functional/snapshot/test_uss_snap_active_deactive.py +++ b/tests/functional/snapshot/test_uss_snap_active_deactive.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ Description: snap should not. """ -import sys from glusto.core import Glusto as g @@ -122,13 +121,13 @@ class SnapUssActiveD(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 2 " "--max-num-of-dirs 2 " "--num-of-files 2 %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, self.counter, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, diff --git a/tests/functional/snapshot/test_uss_snap_restore.py b/tests/functional/snapshot/test_uss_snap_restore.py new file mode 100644 index 000000000..45de07c93 --- /dev/null +++ b/tests/functional/snapshot/test_uss_snap_restore.py @@ -0,0 +1,239 @@ +# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com> +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.io.utils import ( + wait_for_io_to_complete, + get_mounts_stat) +from glustolibs.gluster.snap_ops import ( + snap_create, + get_snap_list, + snap_activate, + snap_restore_complete) +from glustolibs.gluster.uss_ops import ( + enable_uss, + is_uss_enabled, + get_uss_list_snaps, + is_snapd_running, + disable_uss) +from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.gluster.volume_libs import ( + verify_all_process_of_volume_are_online) + + +@runs_on([['replicated', 'distributed-replicated', 'dispersed', + 'distributed', 'distributed-dispersed'], + ['glusterfs', 'nfs']]) +class TestUssSnapRestore(GlusterBaseClass): + + @classmethod + def setUpClass(cls): + # Calling GlusterBaseClass setUpClass + cls.get_super_method(cls, 'setUpClass')() + + # Upload IO scripts for running IO on mounts + cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + ret = upload_scripts(cls.clients, [cls.script_upload_path]) + if not ret: + raise ExecutionError("Failed to upload IO scripts to clients %s" + % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) + + def setUp(self): + # Calling GlusterBaseClass setUp + self.get_super_method(self, 'setUp')() + + self.all_mounts_procs = [] + + # Setup Volume and Mount Volume + ret = self.setup_volume_and_mount_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to Setup_Volume and Mount_Volume") + g.log.info("Successful in Setup Volume and Mount Volume") + self.snapshots = [('snap-test-uss-snap-restore-%s-%s' + % (self.volname, i))for i in range(0, 2)] + + def tearDown(self): + + # Disable uss for volume + ret, _, _ = disable_uss(self.mnode, self.volname) + if ret: + raise ExecutionError("Failed to disable uss") + g.log.info("Successfully disabled uss for volume %s", self.volname) + + # Unmount and cleanup original volume + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to umount and cleanup Volume") + g.log.info("Successful in umounting the volume and Cleanup") + + # Calling GlusterBaseClass teardown + self.get_super_method(self, 'tearDown')() + + def test_uss_snap_restore(self): + """ + Description: + This test case will validate USS after Snapshot restore. + The restored snapshot should not be listed under the '.snaps' + directory. + + * Perform I/O on mounts + * Enable USS on volume + * Validate USS is enabled + * Create a snapshot + * Activate the snapshot + * Perform some more I/O + * Create another snapshot + * Activate the second + * Restore volume to the second snapshot + * From mount point validate under .snaps + - first snapshot should be listed + - second snapshot should not be listed + """ + + # pylint: disable=too-many-statements + # Perform I/O + cmd = ( + "/usr/bin/env python %s create_files " + "-f 10 --base-file-name firstfiles %s" + % (self.script_upload_path, + self.mounts[0].mountpoint)) + proc = g.run_async( + self.mounts[0].client_system, cmd, user=self.mounts[0].user) + self.all_mounts_procs.append(proc) + + # Wait for IO to complete and validate IO + self.assertTrue( + wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]), + "IO failed on %s" % self.mounts[0]) + g.log.info("IO is successful on all mounts") + + # Get stat of all the files/dirs created. + ret = get_mounts_stat(self.mounts) + self.assertTrue(ret, "Stat failed on some of the clients") + g.log.info("Successfully got stat of all files/dirs created") + + # Enable USS + ret, _, _ = enable_uss(self.mnode, self.volname) + self.assertEqual(ret, 0, "Failed to enable USS on volume") + g.log.info("Successfully enabled USS on volume") + + # Validate USS is enabled + ret = is_uss_enabled(self.mnode, self.volname) + self.assertTrue(ret, "USS is disabled on volume %s" % self.volname) + g.log.info("USS enabled on volume %s", self.volname) + + # Create a snapshot + ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[0]) + self.assertEqual(ret, 0, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot %s created successfully for volume %s", + self.snapshots[0], self.volname) + + # Check for number of snaps using snap_list it should be 1 now + snap_list = get_snap_list(self.mnode) + self.assertEqual(1, len(snap_list), "No of snaps not consistent " + "for volume %s" % self.volname) + g.log.info("Successfully validated number of snapshots") + + # Activate the snapshot + ret, _, _ = snap_activate(self.mnode, self.snapshots[0]) + self.assertEqual(ret, 0, ("Failed to activate snapshot %s" + % self.snapshots[0])) + g.log.info("Snapshot %s activated successfully", self.snapshots[0]) + + # Perform I/O + self.all_mounts_procs = [] + cmd = ( + "/usr/bin/env python %s create_files " + "-f 10 --base-file-name secondfiles %s" + % (self.script_upload_path, + self.mounts[0].mountpoint)) + proc = g.run_async( + self.mounts[0].client_system, cmd, user=self.mounts[0].user) + self.all_mounts_procs.append(proc) + + # Wait for IO to complete and validate IO + self.assertTrue( + wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]), + "IO failed on %s" % self.mounts[0]) + g.log.info("IO is successful on all mounts") + + # Get stat of all the files/dirs created. + ret = get_mounts_stat(self.mounts) + self.assertTrue(ret, "Stat failed on some of the clients") + g.log.info("Successfully got stat of all files/dirs created") + + # Create another snapshot + ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[1]) + self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s" + % self.volname)) + g.log.info("Snapshot %s created successfully for volume %s", + self.snapshots[1], self.volname) + + # Check for number of snaps using snap_list it should be 2 now + snap_list = get_snap_list(self.mnode) + self.assertEqual(2, len(snap_list), "No of snaps not consistent " + "for volume %s" % self.volname) + g.log.info("Successfully validated number of snapshots") + + # Activate the second snapshot + ret, _, _ = snap_activate(self.mnode, self.snapshots[1]) + self.assertEqual(ret, 0, ("Failed to activate snapshot %s" + % self.snapshots[1])) + g.log.info("Snapshot %s activated successfully", self.snapshots[1]) + + # Restore volume to the second snapshot + ret = snap_restore_complete( + self.mnode, self.volname, self.snapshots[1]) + self.assertTrue(ret, ("Failed to restore snap %s on the " + "volume %s" % (self.snapshots[1], self.volname))) + g.log.info("Restore of volume is successful from %s on " + "volume %s", self.snapshots[1], self.volname) + + # Verify all volume processes are online + ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) + self.assertTrue(ret, "Failed: All volume processes are not online") + g.log.info("All volume processes are online") + ret = is_snapd_running(self.mnode, self.volname) + self.assertTrue( + ret, "Failed: snapd is not running for volume %s" % self.volname) + g.log.info("Successful: snapd is running") + + # List activated snapshots under the .snaps directory + snap_dir_list = get_uss_list_snaps(self.mounts[0].client_system, + self.mounts[0].mountpoint) + self.assertIsNotNone( + snap_dir_list, "Failed to list snapshots under .snaps directory") + g.log.info("Successfully gathered list of snapshots under the .snaps" + " directory") + + # Check for first snapshot as it should get listed here + self.assertIn(self.snapshots[0], snap_dir_list, + ("Unexpected : %s not listed under .snaps " + "directory" % self.snapshots[0])) + g.log.info("Activated Snapshot %s listed Successfully", + self.snapshots[0]) + + # Check for second snapshot as it should not get listed here + self.assertNotIn(self.snapshots[1], snap_dir_list, + ("Unexpected : %s listed in .snaps " + "directory" % self.snapshots[1])) + g.log.info("Restored Snapshot %s not listed ", self.snapshots[1]) diff --git a/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py b/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py index 599933f4d..c1e42517f 100644 --- a/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py +++ b/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -23,7 +23,6 @@ """ -import sys from glusto.core import Glusto as g @@ -90,23 +89,14 @@ class TestValidateUss(GlusterBaseClass): g.log.info("Successfully disabled uss for volume" "%s", self.volname) - # Calling GlusterBaseClass tearDown - self.get_super_method(self, 'tearDown')() - - @classmethod - def tearDownClass(cls): - """ - Clean up the volume & mount - """ - # stopping the volume and clean up the volume - g.log.info("Starting to Cleanup Volume") - ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) + # Unmount and cleanup-volume + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: - raise ExecutionError("Failed to Cleanup Volume and mount") - g.log.info("Successful in Cleanup Volume and mount") + raise ExecutionError("Failed to Unmount and Cleanup Volume") + g.log.info("Successful in Unmount Volume and Cleanup Volume") - # calling GlusterBaseClass tearDownClass - cls.get_super_method(cls, 'tearDownClass')() + # Calling GlusterBaseClass tearDown + self.get_super_method(self, 'tearDown')() def test_validate_snaps_dir_over_uss(self): @@ -125,13 +115,13 @@ class TestValidateUss(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 2 " "--max-num-of-dirs 2 " "--num-of-files 2 %s" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, self.counter, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, @@ -204,9 +194,9 @@ class TestValidateUss(GlusterBaseClass): g.log.info("Starting IO on all mounts...") all_mounts_procs = [] for mount_obj in self.mounts: - cmd = ("/usr/bin/env python%d %s create_files " + cmd = ("/usr/bin/env python %s create_files " "-f 10 --base-file-name file %s/.snaps/abc/" % ( - sys.version_info.major, self.script_upload_path, + self.script_upload_path, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) diff --git a/tests/functional/snapshot/test_validate_snapshot_max_limit.py b/tests/functional/snapshot/test_validate_snapshot_max_limit.py index 518ec02a6..7832ac5d4 100644 --- a/tests/functional/snapshot/test_validate_snapshot_max_limit.py +++ b/tests/functional/snapshot/test_validate_snapshot_max_limit.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -43,7 +43,6 @@ Steps : """ -import sys from glusto.core import Glusto as g @@ -136,13 +135,13 @@ class SnapCreateMax(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 10 " "--max-num-of-dirs 5 " "--num-of-files 5 %s" % ( - sys.version_info.major, self.script_upload_path, count, + self.script_upload_path, count, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) diff --git a/tests/functional/snapshot/test_validate_snapshot_rebalance.py b/tests/functional/snapshot/test_validate_snapshot_rebalance.py index 71baff186..a03064eca 100644 --- a/tests/functional/snapshot/test_validate_snapshot_rebalance.py +++ b/tests/functional/snapshot/test_validate_snapshot_rebalance.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -20,7 +20,6 @@ Description : The purpose of this test is to validate snapshot create """ -import sys from glusto.core import Glusto as g @@ -114,13 +113,13 @@ class SnapCreateRebal(GlusterBaseClass): for mount_obj in self.mounts: g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " + cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " "--dir-length 10 " "--max-num-of-dirs 5 " "--num-of-files 5 %s" % ( - sys.version_info.major, self.script_upload_path, count, + self.script_upload_path, count, mount_obj.mountpoint)) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) |