From fb5145be2db1a7c96b008af8a40e3b7b18df9673 Mon Sep 17 00:00:00 2001 From: Nigel Babu Date: Mon, 5 Mar 2018 15:49:23 +0530 Subject: Fix up coding style issues in tests Change-Id: I14609030983d4485dbce5a4ffed1e0353e3d1bc7 --- tests/functional/__init__.py | 0 tests/functional/afr/heal/__init__.py | 0 .../heal/test_heal_info_while_accessing_file.py | 228 +++++++ tests/functional/afr/heal/test_self_heal.py | 150 +++-- .../afr/heal/test_self_heal_daemon_process.py | 645 ++++++++++++++++++++ tests/functional/afr/test_client_side_quorum.py | 158 +++-- .../afr/test_heal_info_while_accessing_file.py | 230 ------- .../afr/test_self_heal_daemon_process.py | 663 --------------------- .../brick_cases/test_replica3_to_arbiter.py | 10 +- tests/functional/bvt/__init__.py | 0 tests/functional/bvt/test_basic.py | 4 +- tests/functional/bvt/test_cvt.py | 35 +- tests/functional/bvt/test_vvt.py | 6 +- tests/functional/dht/__init__.py | 0 .../test_negative_exercise_add_brick_command.py | 12 +- tests/functional/glusterd/test_add_brick.py | 134 +++++ .../glusterd/test_add_brick_functionality.py | 134 ----- tests/functional/glusterd/test_concurrent_set.py | 23 +- tests/functional/glusterd/test_nfs_quorum.py | 173 ++++++ .../glusterd/test_nfs_quorum_on_all_vol_types.py | 172 ------ tests/functional/glusterd/test_peer_detach.py | 39 +- tests/functional/glusterd/test_probe_glusterd.py | 10 +- .../test_quorum_related_messages_in_syslog.py | 292 --------- tests/functional/glusterd/test_quorum_syslog.py | 294 +++++++++ .../functional/glusterd/test_rebalance_new_node.py | 162 +++++ .../test_rebalance_status_from_new_node.py | 162 ----- tests/functional/glusterd/test_volume_create.py | 24 +- tests/functional/glusterd/test_volume_delete.py | 10 +- tests/functional/glusterd/test_volume_get.py | 45 +- tests/functional/glusterd/test_volume_op.py | 148 ----- .../functional/glusterd/test_volume_operations.py | 125 +++- tests/functional/glusterd/test_volume_reset.py | 116 ++-- tests/functional/glusterd/test_volume_status.py | 29 +- tests/functional/nfs_ganesha/__init__.py | 0 .../nfs_ganesha/acls/test_nfs_ganesha_acls.py | 113 ---- .../exports/test_nfs_ganesha_volume_exports.py | 563 ----------------- .../nfs_ganesha/sanity/test_nfs_ganesha_sanity.py | 101 ---- .../nfs_ganesha/test_nfs_ganesha_acls.py | 114 ++++ .../test_nfs_ganesha_run_io_multiple_clients.py | 7 - .../nfs_ganesha/test_nfs_ganesha_sanity.py | 101 ++++ .../nfs_ganesha/test_nfs_ganesha_volume_exports.py | 556 +++++++++++++++++ tests/functional/quota/__init__.py | 0 tests/functional/quota/test_non_existent_dir.py | 8 +- tests/functional/snapshot/test_256_snapshots.py | 172 ++++++ tests/functional/snapshot/test_auto_delete.py | 16 +- .../functional/snapshot/test_create_brick_down.py | 145 +++++ .../snapshot/test_snap_create_brickdown.py | 146 ----- tests/functional/snapshot/test_snapshot_create.py | 205 +++++++ tests/functional/snapshot/test_snapshot_restore.py | 279 +++++++++ .../snapshot/test_validate_snapshot_256.py | 174 ------ .../snapshot/test_validate_snapshot_create.py | 204 ------- .../snapshot/test_validate_snapshot_restore.py | 279 --------- 52 files changed, 3643 insertions(+), 3773 deletions(-) create mode 100644 tests/functional/__init__.py create mode 100644 tests/functional/afr/heal/__init__.py create mode 100644 tests/functional/afr/heal/test_heal_info_while_accessing_file.py mode change 100755 => 100644 tests/functional/afr/heal/test_self_heal.py create mode 100644 tests/functional/afr/heal/test_self_heal_daemon_process.py delete mode 100644 tests/functional/afr/test_heal_info_while_accessing_file.py delete mode 100644 tests/functional/afr/test_self_heal_daemon_process.py create mode 100644 tests/functional/bvt/__init__.py create mode 100644 tests/functional/dht/__init__.py create mode 100644 tests/functional/glusterd/test_add_brick.py delete mode 100644 tests/functional/glusterd/test_add_brick_functionality.py create mode 100644 tests/functional/glusterd/test_nfs_quorum.py delete mode 100644 tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py delete mode 100644 tests/functional/glusterd/test_quorum_related_messages_in_syslog.py create mode 100644 tests/functional/glusterd/test_quorum_syslog.py create mode 100644 tests/functional/glusterd/test_rebalance_new_node.py delete mode 100644 tests/functional/glusterd/test_rebalance_status_from_new_node.py delete mode 100644 tests/functional/glusterd/test_volume_op.py create mode 100644 tests/functional/nfs_ganesha/__init__.py delete mode 100644 tests/functional/nfs_ganesha/acls/test_nfs_ganesha_acls.py delete mode 100644 tests/functional/nfs_ganesha/exports/test_nfs_ganesha_volume_exports.py delete mode 100644 tests/functional/nfs_ganesha/sanity/test_nfs_ganesha_sanity.py create mode 100644 tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py create mode 100644 tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py create mode 100644 tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py create mode 100644 tests/functional/quota/__init__.py create mode 100644 tests/functional/snapshot/test_256_snapshots.py create mode 100644 tests/functional/snapshot/test_create_brick_down.py delete mode 100644 tests/functional/snapshot/test_snap_create_brickdown.py create mode 100644 tests/functional/snapshot/test_snapshot_create.py create mode 100644 tests/functional/snapshot/test_snapshot_restore.py delete mode 100644 tests/functional/snapshot/test_validate_snapshot_256.py delete mode 100644 tests/functional/snapshot/test_validate_snapshot_create.py delete mode 100644 tests/functional/snapshot/test_validate_snapshot_restore.py (limited to 'tests') diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/functional/afr/heal/__init__.py b/tests/functional/afr/heal/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/functional/afr/heal/test_heal_info_while_accessing_file.py b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py new file mode 100644 index 000000000..965adbdc1 --- /dev/null +++ b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py @@ -0,0 +1,228 @@ +# Copyright (C) 2015-2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from glusto.core import Glusto as g +from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on) +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.volume_libs import get_subvols +from glustolibs.gluster.brick_libs import (bring_bricks_offline, + are_bricks_offline, + get_all_bricks) + +from glustolibs.gluster.heal_ops import get_heal_info_summary +from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.io.utils import (validate_io_procs, + list_all_files_and_dirs_mounts, + wait_for_io_to_complete) + + +@runs_on([['distributed-replicated'], + ['glusterfs']]) +class TestSelfHeal(GlusterBaseClass): + """ + Description: + Test cases related to + healing in default configuration of the volume + """ + + @classmethod + def setUpClass(cls): + # Calling GlusterBaseClass setUpClass + GlusterBaseClass.setUpClass.im_func(cls) + + # Upload io scripts for running IO on mounts + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) + script_local_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + ret = upload_scripts(cls.clients, [script_local_path]) + if not ret: + raise ExecutionError("Failed to upload IO scripts to clients %s" + % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) + + cls.counter = 1 + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 in + # this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. + + def setUp(self): + # Calling GlusterBaseClass setUp + GlusterBaseClass.setUp.im_func(self) + + self.all_mounts_procs = [] + self.io_validation_complete = False + + # Setup Volume and Mount Volume + g.log.info("Starting to Setup Volume and Mount Volume") + ret = self.setup_volume_and_mount_volume(mounts=self.mounts, + volume_create_force=False) + if not ret: + raise ExecutionError("Failed to Setup_Volume and Mount_Volume") + g.log.info("Successful in Setup Volume and Mount Volume") + + self.bricks_list = get_all_bricks(self.mnode, self.volname) + + def tearDown(self): + """ + If test method failed before validating IO, tearDown waits for the + IO's to complete and checks for the IO exit status + + Cleanup and umount volume + """ + if not self.io_validation_complete: + g.log.info("Wait for IO to complete as IO validation did not " + "succeed in test method") + ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts) + if not ret: + raise ExecutionError("IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + # List all files and dirs created + g.log.info("List all files and directories:") + ret = list_all_files_and_dirs_mounts(self.mounts) + if not ret: + raise ExecutionError("Failed to list all files and dirs") + g.log.info("Listing all files and directories is successful") + + # Cleanup and umount volume + g.log.info("Starting to Unmount Volume and Cleanup Volume") + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to umount the vol & cleanup Volume") + g.log.info("Successful in umounting the volume and Cleanup") + + # Calling GlusterBaseClass teardown + GlusterBaseClass.tearDown.im_func(self) + + def test_heal_info_shouldnot_list_files_being_accessed(self): + """ + - bring brick 1 offline + - create files and validate IO + - get entries before accessing file + - get first filename from active subvol without offline bricks + - access and modify the file + - while accessing - get entries + - Compare entries before accessing and while accessing + - validate IO + """ + + # Bring 1-st brick offline + brick_to_bring_offline = [self.bricks_list[0]] + g.log.info('Bringing bricks %s offline...', brick_to_bring_offline) + ret = bring_bricks_offline(self.volname, brick_to_bring_offline) + self.assertTrue(ret, 'Failed to bring bricks %s offline' + % brick_to_bring_offline) + + ret = are_bricks_offline(self.mnode, self.volname, + brick_to_bring_offline) + self.assertTrue(ret, 'Bricks %s are not offline' + % brick_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + brick_to_bring_offline) + + # Creating files on client side + for mount_obj in self.mounts: + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_obj.mountpoint) + + # Creating files + cmd = ("python %s create_files -f 100 %s" + % (self.script_upload_path, mount_obj.mountpoint)) + + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + self.all_mounts_procs.append(proc) + + # Validate IO + g.log.info("Wait for IO to complete and validate IO ...") + ret = validate_io_procs(self.all_mounts_procs, self.mounts) + self.io_validation_complete = True + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + # Get entries before accessing file + g.log.info("Getting entries_before_accessing file...") + entries_before_accessing = get_heal_info_summary( + self.mnode, self.volname) + self.assertNotEqual(entries_before_accessing, None, + 'Can`t get heal info summary') + g.log.info( + "Getting entries_before_accessing file finished successfully") + + # Get filename to access from active subvol without offline bricks + # Get last subvol + subvols = get_subvols(self.mnode, self.volname) + subvol_without_offline_brick = subvols['volume_subvols'][-1] + + # Get first brick server and brick path + # and get first file from filelist + subvol_mnode, mnode_brick = subvol_without_offline_brick[0].split(':') + ret, file_list, _ = g.run(subvol_mnode, 'ls %s' % mnode_brick) + file_to_edit = file_list.splitlines()[0] + + # Access and modify the file + g.log.info("Start modifying IO on all mounts...") + self.all_mounts_procs = [] + for mount_obj in self.mounts: + g.log.info("Modifying IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) + + cmd = ("cd %s/ ; " + "dd if=/dev/zero of=%s bs=1G count=1" + % (mount_obj.mountpoint, file_to_edit)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + self.all_mounts_procs.append(proc) + g.log.info("IO on %s:%s is modified successfully", + mount_obj.client_system, mount_obj.mountpoint) + self.io_validation_complete = False + + # Get entries while accessing file + g.log.info("Getting entries while accessing file...") + entries_while_accessing = get_heal_info_summary( + self.mnode, self.volname) + self.assertNotEqual(entries_before_accessing, None, + 'Can`t get heal info summary') + g.log.info("Getting entries while accessing file " + "finished successfully") + + # Compare dicts before accessing and while accessing + g.log.info('Comparing entries before modifying and while modifying...') + ret = cmp(entries_before_accessing, entries_while_accessing) + self.assertEqual(ret, 0, 'Entries before modifying and while modifying' + 'are not equal') + g.log.info('Comparison entries before modifying and while modifying' + 'finished successfully.') + + # Validate IO + g.log.info("Wait for IO to complete and validate IO ...") + ret = validate_io_procs(self.all_mounts_procs, self.mounts) + self.assertTrue(ret, "IO failed on some of the clients") + self.io_validation_complete = True + g.log.info("IO is successful on all mounts") diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py old mode 100755 new mode 100644 index 7837d958c..b2e52e392 --- a/tests/functional/afr/heal/test_self_heal.py +++ b/tests/functional/afr/heal/test_self_heal.py @@ -55,8 +55,8 @@ class TestSelfHeal(GlusterBaseClass): GlusterBaseClass.setUpClass.im_func(cls) # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) script_local_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" @@ -65,23 +65,22 @@ class TestSelfHeal(GlusterBaseClass): if not ret: raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 + # in this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. def setUp(self): # Calling GlusterBaseClass setUp @@ -129,13 +128,6 @@ class TestSelfHeal(GlusterBaseClass): # Calling GlusterBaseClass teardown GlusterBaseClass.tearDown.im_func(self) - @classmethod - def tearDownClass(cls): - """tearDownClass. This will be executed once per class. - """ - # Calling GlusterBaseClass tearDownClass. - GlusterBaseClass.tearDownClass.im_func(cls) - def test_data_self_heal_daemon_off(self): """ Test Data-Self-Heal (heal command) @@ -166,22 +158,22 @@ class TestSelfHeal(GlusterBaseClass): in cycle - validate IO """ + # pylint: disable=too-many-statements # Setting options g.log.info('Setting options...') options = {"metadata-self-heal": "off", "entry-self-heal": "off", - "data-self-heal": "off", - } + "data-self-heal": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) - g.log.info("Successfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Successfully set %s for volume %s", + options, self.volname) # Creating files on client side for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 100 --fixed-file-size 1k %s" @@ -217,12 +209,12 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( self.mnode, self.volname)) bricks_to_bring_offline = filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks'])) + bricks_to_bring_offline_dict['hot_tier_bricks'] + + bricks_to_bring_offline_dict['cold_tier_bricks'] + + bricks_to_bring_offline_dict['volume_bricks'])) # Bring brick offline - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -231,8 +223,8 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Get areequal after getting bricks offline g.log.info('Getting areequal after getting bricks offline...') @@ -252,8 +244,8 @@ class TestSelfHeal(GlusterBaseClass): # Modify the data self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Modifying data for %s:%s" % - (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Modifying data for %s:%s", mount_obj.client_system, + mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 100 --fixed-file-size 10k %s" @@ -272,13 +264,13 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("IO is successful on all mounts") # Bring brick online - g.log.info('Bringing bricks %s online...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online...', bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Setting options g.log.info('Setting options...') @@ -300,7 +292,7 @@ class TestSelfHeal(GlusterBaseClass): ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") @@ -333,10 +325,10 @@ class TestSelfHeal(GlusterBaseClass): self.all_servers_info) self.assertTrue(ret, ("Failed to expand the volume %s", self.volname)) g.log.info("Expanding volume is successful on " - "volume %s" % self.volname) + "volume %s", self.volname) # Do rebalance - ret, out, err = rebalance_start(self.mnode, self.volname) + ret, _, _ = rebalance_start(self.mnode, self.volname) self.assertEqual(ret, 0, 'Failed to start rebalance') g.log.info('Rebalance is started') @@ -347,8 +339,8 @@ class TestSelfHeal(GlusterBaseClass): # Create 1k files self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Modifying data for %s:%s" % - (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Modifying data for %s:%s", mount_obj.client_system, + mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 1000 %s" @@ -363,7 +355,7 @@ class TestSelfHeal(GlusterBaseClass): bricks_list = get_all_bricks(self.mnode, self.volname) for brick in bricks_list: # Bring brick offline - g.log.info('Bringing bricks %s offline' % brick) + g.log.info('Bringing bricks %s offline', brick) ret = bring_bricks_offline(self.volname, [brick]) self.assertTrue(ret, 'Failed to bring bricks %s offline' % brick) @@ -371,17 +363,17 @@ class TestSelfHeal(GlusterBaseClass): [brick]) self.assertTrue(ret, 'Bricks %s are not offline' % brick) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Bring brick online - g.log.info('Bringing bricks %s online...' % brick) + g.log.info('Bringing bricks %s online...', brick) ret = bring_bricks_online(self.mnode, self.volname, [brick]) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Wait for volume processes to be online g.log.info("Wait for volume processes to be online") @@ -398,7 +390,7 @@ class TestSelfHeal(GlusterBaseClass): self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") @@ -442,13 +434,13 @@ class TestSelfHeal(GlusterBaseClass): - get areequal after getting bricks online and compare with arequal before bringing bricks online """ + # pylint: disable=too-many-statements # Setting options g.log.info('Setting options...') options = {"metadata-self-heal": "off", "entry-self-heal": "off", - "data-self-heal": "off", - } + "data-self-heal": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Options " @@ -461,9 +453,8 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("Starting IO on all mounts...") self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) cmd = ("python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-length 2 " @@ -476,9 +467,8 @@ class TestSelfHeal(GlusterBaseClass): user=mount_obj.user) self.all_mounts_procs.append(proc) self.counter = self.counter + 10 - g.log.info("IO on %s:%s is started successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("IO on %s:%s is started successfully", + mount_obj.client_system, mount_obj.mountpoint) self.io_validation_complete = False # Validate IO @@ -493,8 +483,7 @@ class TestSelfHeal(GlusterBaseClass): cmd_list = ["python %s create_files -f 20 %s", "python %s mv -i '.trashcan' %s", "python %s copy --dest-dir new_dir %s", - "python %s delete %s", - ] + "python %s delete %s"] for cmd in cmd_list: # Get areequal before getting bricks offline @@ -506,8 +495,7 @@ class TestSelfHeal(GlusterBaseClass): # Setting options g.log.info('Setting options...') - options = {"self-heal-daemon": "off", - } + options = {"self-heal-daemon": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Option 'self-heal-daemon' " @@ -517,13 +505,13 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( self.mnode, self.volname)) bricks_to_bring_offline = filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks'])) + bricks_to_bring_offline_dict['hot_tier_bricks'] + + bricks_to_bring_offline_dict['cold_tier_bricks'] + + bricks_to_bring_offline_dict['volume_bricks'])) # Bring brick offline - g.log.info('Bringing bricks %s offline...' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', + bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -532,8 +520,8 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Get areequal after getting bricks offline g.log.info('Getting areequal after getting bricks offline...') @@ -559,9 +547,8 @@ class TestSelfHeal(GlusterBaseClass): proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) self.all_mounts_procs.append(proc) - g.log.info("IO on %s:%s is modified successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("IO on %s:%s is modified successfully", + mount_obj.client_system, mount_obj.mountpoint) self.io_validation_complete = False # Validate IO @@ -586,19 +573,18 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("Listing all files and directories is successful") # Bring brick online - g.log.info('Bringing bricks %s online...' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online...', + bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Setting options g.log.info('Setting options...') - options = {"self-heal-daemon": "on", - } + options = {"self-heal-daemon": "on"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Option 'self-heal-daemon' is set to 'on' successfully") @@ -618,7 +604,7 @@ class TestSelfHeal(GlusterBaseClass): self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") diff --git a/tests/functional/afr/heal/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py new file mode 100644 index 000000000..3412c1b49 --- /dev/null +++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py @@ -0,0 +1,645 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" Description: + Test Cases in this module tests the self heal daemon process. +""" + +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.volume_libs import ( + expand_volume, shrink_volume, log_volume_info_and_status, + wait_for_volume_process_to_be_online) +from glustolibs.gluster.rebalance_ops import (rebalance_start, + wait_for_rebalance_to_complete, + rebalance_status) +from glustolibs.gluster.brick_libs import (get_all_bricks, + bring_bricks_offline, + bring_bricks_online, + are_bricks_online, + select_bricks_to_bring_offline) +from glustolibs.gluster.brick_ops import replace_brick +from glustolibs.gluster.heal_libs import (get_self_heal_daemon_pid, + do_bricks_exist_in_shd_volfile, + is_shd_daemonized, + are_all_self_heal_daemons_are_online) +from glustolibs.gluster.volume_ops import (volume_stop, volume_start) +from glustolibs.gluster.gluster_init import restart_glusterd + + +@runs_on([['replicated', 'distributed-replicated', 'dispersed', + 'distributed-dispersed'], ['glusterfs', 'nfs', 'cifs']]) +class SelfHealDaemonProcessTests(GlusterBaseClass): + """ + SelfHealDaemonProcessTests contains tests which verifies the + self-heal daemon process of the nodes + """ + def setUp(self): + """ + setup volume, mount volume and initialize necessary variables + which is used in tests + """ + + # calling GlusterBaseClass setUpClass + GlusterBaseClass.setUp.im_func(self) + + # Setup Volume and Mount Volume + g.log.info("Starting to Setup Volume and Mount Volume") + ret = self.setup_volume_and_mount_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to Setup_Volume and Mount_Volume") + g.log.info("Successful in Setup Volume and Mount Volume") + + # Verfiy glustershd process releases its parent process + ret = is_shd_daemonized(self.servers) + if not ret: + raise ExecutionError("Self Heal Daemon process was still" + " holding parent process.") + g.log.info("Self Heal Daemon processes are online") + + self.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol" + + def tearDown(self): + """ + Clean up the volume and umount volume from client + """ + + # stopping the volume + g.log.info("Starting to Unmount Volume and Cleanup Volume") + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to Unmount Volume and Cleanup Volume") + g.log.info("Successful in Unmount Volume and Cleanup Volume") + + # calling GlusterBaseClass tearDownClass + GlusterBaseClass.tearDown.im_func(self) + + def test_glustershd_with_add_remove_brick(self): + """ + Test script to verify glustershd process with adding and + removing bricks + + * check glustershd process - only 1 glustershd process should + be running + * bricks must be present in glustershd-server.vol file for + the replicated involved volumes + * Add bricks + * check glustershd process - only 1 glustershd process should + be running and its should be different from previous one + * bricks which are added must present in glustershd-server.vol file + * remove bricks + * check glustershd process - only 1 glustershd process should + be running and its different from previous one + * bricks which are removed should not present + in glustershd-server.vol file + + """ + # pylint: disable=too-many-statements + nodes = self.volume['servers'] + bricks_list = [] + glustershd_pids = {} + + # check the self-heal daemon process + g.log.info("Starting to get self-heal daemon process on " + "nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process " + "found : %s", pids)) + g.log.info("Successful in getting Single self heal daemon process" + " on all nodes %s", nodes) + glustershd_pids = pids + + # get the bricks for the volume + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # validate the bricks present in volume info with + # glustershd server volume file + g.log.info("Starting parsing file %s on " + "node %s", self.glustershd, self.mnode) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list) + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file. " + "Please check log file for details")) + g.log.info("Successfully parsed %s file", self.glustershd) + + # expanding volume + g.log.info("Start adding bricks to volume %s", self.volname) + ret = expand_volume(self.mnode, self.volname, self.servers, + self.all_servers_info) + self.assertTrue(ret, ("Failed to add bricks to " + "volume %s " % self.volname)) + g.log.info("Add brick successfull") + + # Log Volume Info and Status after expanding the volume + g.log.info("Logging volume info and Status after expanding volume") + ret = log_volume_info_and_status(self.mnode, self.volname) + self.assertTrue(ret, ("Logging volume info and status failed " + "on volume %s", self.volname)) + g.log.info("Successful in logging volume info and status " + "of volume %s", self.volname) + + # Verify volume's all process are online for 60 sec + g.log.info("Verifying volume's all process are online") + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + 60) + self.assertTrue(ret, ("Volume %s : All process are not " + "online", self.volname)) + g.log.info("Successfully Verified volume %s processes are online", + self.volname) + + # Start Rebalance + g.log.info("Starting Rebalance on the volume") + ret, _, err = rebalance_start(self.mnode, self.volname) + self.assertEqual(ret, 0, ("Failed to start rebalance on " + "the volume %s with error %s" % + (self.volname, err))) + g.log.info("Successfully started rebalance on the " + "volume %s", self.volname) + + # Log Rebalance status + g.log.info("Log Rebalance status") + _, _, _ = rebalance_status(self.mnode, self.volname) + + # Wait for rebalance to complete + g.log.info("Waiting for rebalance to complete") + ret = wait_for_rebalance_to_complete(self.mnode, self.volname) + self.assertTrue(ret, ("Rebalance is not yet complete " + "on the volume %s", self.volname)) + g.log.info("Rebalance is successfully complete on " + "the volume %s", self.volname) + + # Check Rebalance status after rebalance is complete + g.log.info("Checking Rebalance status") + ret, _, _ = rebalance_status(self.mnode, self.volname) + self.assertEqual(ret, 0, ("Failed to get rebalance status for " + "the volume %s", self.volname)) + g.log.info("Successfully got rebalance status of the " + "volume %s", self.volname) + + # Check the self-heal daemon process after adding bricks + g.log.info("Starting to get self-heal daemon process on " + "nodes %s", nodes) + glustershd_pids_after_expanding = {} + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + g.log.info("Successfull in getting self-heal daemon process " + "on nodes %s", nodes) + + glustershd_pids_after_expanding = pids + g.log.info("Self Heal Daemon Process ID's afetr expanding " + "volume: %s", glustershd_pids_after_expanding) + + self.assertNotEqual(glustershd_pids, + glustershd_pids_after_expanding, + "Self Daemon process is same before and" + " after adding bricks") + g.log.info("Self Heal Daemon Process is different before and " + "after adding bricks") + + # get the bricks for the volume after expanding + bricks_list_after_expanding = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List after expanding " + "volume: %s", bricks_list_after_expanding) + + # validate the bricks present in volume info + # with glustershd server volume file after adding bricks + g.log.info("Starting parsing file %s", self.glustershd) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list_after_expanding) + + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file after " + "expanding bricks. Please check log file " + "for details")) + g.log.info("Successfully parsed %s file", self.glustershd) + + # shrink the volume + g.log.info("Starting volume shrink") + ret = shrink_volume(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to shrink the volume on " + "volume %s", self.volname)) + g.log.info("Shrinking volume is successful on " + "volume %s", self.volname) + + # Log Volume Info and Status after shrinking the volume + g.log.info("Logging volume info and Status after shrinking volume") + ret = log_volume_info_and_status(self.mnode, self.volname) + self.assertTrue(ret, ("Logging volume info and status failed on " + "volume %s", self.volname)) + g.log.info("Successful in logging volume info and status " + "of volume %s", self.volname) + + # get the bricks after shrinking the volume + bricks_list_after_shrinking = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List after shrinking " + "volume: %s", bricks_list_after_shrinking) + + self.assertEqual(len(bricks_list_after_shrinking), len(bricks_list), + "Brick Count is mismatched after " + "shrinking the volume %s" % self.volname) + g.log.info("Brick Count matched before before expanding " + "and after shrinking volume") + + # Verfiy glustershd process releases its parent process + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + + # check the self-heal daemon process after removing bricks + g.log.info("Starting to get self-heal daemon process " + "on nodes %s", nodes) + glustershd_pids_after_shrinking = {} + ret, pids = get_self_heal_daemon_pid(nodes) + glustershd_pids_after_shrinking = pids + self.assertNotEqual(glustershd_pids_after_expanding, + glustershd_pids_after_shrinking, + "Self Heal Daemon process is same " + "after adding bricks and shrinking volume") + g.log.info("Self Heal Daemon Process is different after adding bricks " + "and shrinking volume") + + # validate bricks present in volume info + # with glustershd server volume file after removing bricks + g.log.info("Starting parsing file %s", self.glustershd) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list_after_shrinking) + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file after " + "removing bricks. Please check log file " + "for details")) + g.log.info("Successfully parsed %s file", self.glustershd) + + def test_glustershd_with_restarting_glusterd(self): + """ + Test Script to verify the self heal daemon process with restarting + glusterd and rebooting the server + + * stop all volumes + * restart glusterd - should not run self heal daemon process + * start replicated involved volumes + * single self heal daemon process running + * restart glusterd + * self heal daemon pid will change + * bring down brick and restart glusterd + * self heal daemon pid will change and its different from previous + * brought up the brick + + """ + # pylint: disable=too-many-statements + nodes = self.volume['servers'] + + # stop the volume + g.log.info("Stopping the volume %s", self.volname) + ret = volume_stop(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) + g.log.info("Successfully stopped volume %s", self.volname) + + # check the self heal daemon process after stopping the volume + g.log.info("Verifying the self heal daemon process for " + "volume %s", self.volname) + ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) + self.assertFalse(ret, ("Self Heal Daemon process is still running " + "even after stopping volume %s" % self.volname)) + g.log.info("Self Heal Daemon is not running after stopping " + "volume %s", self.volname) + + # restart glusterd service on all the servers + g.log.info("Restarting glusterd on all servers %s", nodes) + ret = restart_glusterd(nodes) + self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", + nodes)) + g.log.info("Successfully restarted glusterd on all nodes %s", + nodes) + + # check the self heal daemon process after restarting glusterd process + g.log.info("Starting to get self-heal daemon process on" + " nodes %s", nodes) + ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) + self.assertFalse(ret, ("Self Heal Daemon process is running after " + "glusterd restart with volume %s in " + "stop state" % self.volname)) + g.log.info("Self Heal Daemon is not running after stopping " + "volume and restarting glusterd %s", self.volname) + + # start the volume + g.log.info("Starting the volume %s", self.volname) + ret = volume_start(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) + g.log.info("Volume %s started successfully", self.volname) + + # Verfiy glustershd process releases its parent process + g.log.info("Checking whether glustershd process is daemonized or not") + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + g.log.info("Single self heal daemon process on all nodes %s", nodes) + + # get the self heal daemon pids after starting volume + g.log.info("Starting to get self-heal daemon process " + "on nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + g.log.info("Succesfull in getting self heal daemon pids") + glustershd_pids = pids + + # get the bricks for the volume + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # validate the bricks present in volume info + # with glustershd server volume file + g.log.info("Starting parsing file %s on " + "node %s", self.glustershd, self.mnode) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list) + self.assertTrue(ret, ("Brick List from volume info is different from " + "glustershd server volume file. " + "Please check log file for details.")) + g.log.info("Successfully parsed %s file", self.glustershd) + + # restart glusterd service on all the servers + g.log.info("Restarting glusterd on all servers %s", nodes) + ret = restart_glusterd(nodes) + self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", + nodes)) + g.log.info("Successfully restarted glusterd on all nodes %s", + nodes) + + # Verify volume's all process are online for 60 sec + g.log.info("Verifying volume's all process are online") + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + 60) + self.assertTrue(ret, ("Volume %s : All process are not " + "online", self.volname)) + g.log.info("Successfully Verified volume %s processes are online", + self.volname) + + # Verfiy glustershd process releases its parent process + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + + # check the self heal daemon process after starting volume and + # restarting glusterd process + g.log.info("Starting to get self-heal daemon process " + "on nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + glustershd_pids_after_glusterd_restart = pids + + self.assertNotEqual(glustershd_pids, + glustershd_pids_after_glusterd_restart, + ("Self Heal Daemon pids are same after " + "restarting glusterd process")) + g.log.info("Self Heal Daemon process are different before and " + "after restarting glusterd process") + + # select bricks to bring offline + bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( + self.mnode, self.volname)) + bricks_to_bring_offline = filter(None, ( + bricks_to_bring_offline_dict['hot_tier_bricks'] + + bricks_to_bring_offline_dict['cold_tier_bricks'] + + bricks_to_bring_offline_dict['volume_bricks'])) + + # bring bricks offline + g.log.info("Going to bring down the brick process " + "for %s", bricks_to_bring_offline) + ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) + self.assertTrue(ret, ("Failed to bring down the bricks. Please " + "check the log file for more details.")) + g.log.info("Brought down the brick process " + "for %s succesfully", bricks_to_bring_offline) + + # restart glusterd after brought down the brick + g.log.info("Restart glusterd on all servers %s", nodes) + ret = restart_glusterd(nodes) + self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", + nodes)) + g.log.info("Successfully restarted glusterd on all nodes %s", + nodes) + + # Verify volume's all process are online for 60 sec + g.log.info("Verifying volume's all process are online") + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + 60) + self.assertTrue(ret, ("Volume %s : All process are not " + "online", self.volname)) + g.log.info("Successfully Verified volume %s processes are online", + self.volname) + + # Verfiy glustershd process releases its parent process + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + + # check the self heal daemon process after killing brick and + # restarting glusterd process + g.log.info("Starting to get self-heal daemon process " + "on nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + glustershd_pids_after_killing_brick = pids + + self.assertNotEqual(glustershd_pids_after_glusterd_restart, + glustershd_pids_after_killing_brick, + ("Self Heal Daemon process are same from before " + "killing the brick,restarting glusterd process")) + g.log.info("Self Heal Daemon process are different after killing the " + "brick, restarting the glusterd process") + + # brought the brick online + g.log.info("bringing up the bricks : %s online", + bricks_to_bring_offline) + ret = bring_bricks_online(self.mnode, self.volname, + bricks_to_bring_offline) + self.assertTrue(ret, ("Failed to brought the bricks online")) + g.log.info("Successfully brought the bricks online") + + # check all bricks are online + g.log.info("Verifying all bricka are online or not.....") + ret = are_bricks_online(self.mnode, self.volname, + bricks_to_bring_offline) + self.assertTrue(ret, ("Not all bricks are online")) + g.log.info("All bricks are online.") + + +@runs_on([['replicated', 'distributed-replicated'], + ['glusterfs', 'nfs', 'cifs']]) +class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): + """ + ClientSideQuorumTests contains tests which verifies the + client side quorum Test Cases + """ + + @classmethod + def setUpClass(cls): + # Calling GlusterBaseClass setUpClass + GlusterBaseClass.setUpClass.im_func(cls) + + # Override Volumes + if cls.volume_type == "distributed-replicated": + # Define distributed-replicated volume + cls.volume['voltype'] = { + 'type': 'distributed-replicated', + 'dist_count': 2, + 'replica_count': 3, + 'arbiter_count': 1, + 'transport': 'tcp'} + + cls.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol" + + def setUp(self): + """ + setUp method for every test + """ + + # calling GlusterBaseClass setUp + GlusterBaseClass.setUp.im_func(self) + + self.all_mounts_procs = [] + self.io_validation_complete = False + + # Setup Volume and Mount Volume + g.log.info("Starting to Setup Volume %s", self.volname) + ret = self.setup_volume_and_mount_volume(self.mounts, + volume_create_force=False) + if not ret: + raise ExecutionError("Failed to Setup_Volume and Mount_Volume") + g.log.info("Successful in Setup Volume and Mount Volume") + + def tearDown(self): + """ + If test method failed before validating IO, tearDown waits for the + IO's to complete and checks for the IO exit status + + Cleanup and umount volume + """ + # Cleanup and umount volume + g.log.info("Starting to Unmount Volume and Cleanup Volume") + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to umount the vol & cleanup Volume") + g.log.info("Successful in umounting the volume and Cleanup") + + # Calling GlusterBaseClass teardown + GlusterBaseClass.tearDown.im_func(self) + + def test_impact_of_replace_brick_for_glustershd(self): + nodes = self.volume['servers'] + + # check the self-heal daemon process + g.log.info("Starting to get self-heal daemon process on " + "nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process " + "found : %s" % pids)) + g.log.info("Successful in getting Single self heal daemon process" + " on all nodes %s", nodes) + glustershd_pids = pids + + # get the bricks for the volume + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # validate the bricks present in volume info with + # glustershd server volume file + g.log.info("Starting parsing file %s on " + "node %s", self.glustershd, self.mnode) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list) + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file. " + "Please check log file for details")) + g.log.info("Successfully parsed %s file", self.glustershd) + + # replace brick + brick_to_replace = bricks_list[-1] + new_brick = brick_to_replace + 'new' + g.log.info("Replacing the brick %s for the volume : %s", + brick_to_replace, self.volname) + ret, _, err = replace_brick(self.mnode, self.volname, + brick_to_replace, new_brick) + self.assertFalse(ret, err) + g.log.info('Replaced brick %s to %s successfully', + brick_to_replace, new_brick) + + # check bricks + bricks_list = get_all_bricks(self.mnode, self.volname) + self.assertEqual(bricks_list[-1], new_brick, 'Replaced brick and ' + 'new brick are not equal') + + # Verify volume's all process are online for 60 sec + g.log.info("Verifying volume's all process are online") + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + timeout=60) + self.assertTrue(ret, ("Volume %s : All process are not " + "online", self.volname)) + g.log.info("Successfully Verified volume %s processes are online", + self.volname) + + # Verify glustershd process releases its parent process + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + + # check the self-heal daemon process + g.log.info("Starting to get self-heal daemon process on " + "nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process " + "found : %s" % pids)) + g.log.info("Successful in getting Single self heal daemon process" + " on all nodes %s", nodes) + glustershd_pids_after_replacement = pids + + # Compare pids before and after replacing + self.assertNotEqual(glustershd_pids, + glustershd_pids_after_replacement, + "Self Daemon process is same before and" + " after replacing bricks") + g.log.info("Self Heal Daemon Process is different before and " + "after replacing bricks") + + # get the bricks for the volume after replacing + bricks_list_after_replacing = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List after expanding " + "volume: %s", bricks_list_after_replacing) + + # validate the bricks present in volume info + # with glustershd server volume file after replacing bricks + g.log.info("Starting parsing file %s", self.glustershd) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list_after_replacing) + + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file after " + "replacing bricks. Please check log file " + "for details")) + g.log.info("Successfully parsed %s file", self.glustershd) diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py index 2512faee3..ba0aaa772 100644 --- a/tests/functional/afr/test_client_side_quorum.py +++ b/tests/functional/afr/test_client_side_quorum.py @@ -18,6 +18,7 @@ Test Cases in this module tests the client side quorum. """ +import tempfile from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError @@ -33,10 +34,8 @@ from glustolibs.gluster.brick_libs import (bring_bricks_offline, from glustolibs.io.utils import (validate_io_procs, is_io_procs_fail_with_rofs, list_all_files_and_dirs_mounts, - wait_for_io_to_complete - ) + wait_for_io_to_complete) from glustolibs.gluster.mount_ops import mount_volume, umount_volume -import tempfile @runs_on([['replicated', 'distributed-replicated'], @@ -74,7 +73,7 @@ class ClientSideQuorumTests(GlusterBaseClass): GlusterBaseClass.setUp.im_func(self) # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume %s" % self.volname) + g.log.info("Starting to Setup Volume %s", self.volname) ret = self.setup_volume_and_mount_volume(self.mounts) if not ret: raise ExecutionError("Failed to Setup_Volume and Mount_Volume") @@ -93,6 +92,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("Successful in Unmount Volume and Cleanup Volume") # Calling GlusterBaseClass tearDown + GlusterBaseClass.tearDown.im_func(self) def test_client_side_quorum_with_auto_option(self): @@ -105,19 +105,19 @@ class ClientSideQuorumTests(GlusterBaseClass): * perform ops """ + # pylint: disable=too-many-branches,too-many-statements # set cluster.quorum-type to auto options = {"cluster.quorum-type": "auto"} g.log.info("setting cluster.quorum-type to auto on " - "volume %s" % self.volname) + "volume %s", self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for" "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Sucessfully set %s for volume %s", options, self.volname) # write files on all mounts g.log.info("Starting IO on all mounts...") - g.log.info("mounts: %s" % self.mounts) + g.log.info("mounts: %s", self.mounts) all_mounts_procs = [] for mount_obj in self.mounts: cmd = ("python %s create_files " @@ -134,28 +134,27 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("IO is successful on all mounts") # get the subvolumes - g.log.info("Starting to get sub-volumes for volume %s" % self.volname) + g.log.info("Starting to get sub-volumes for volume %s", self.volname) subvols_dict = get_subvols(self.mnode, self.volname) num_subvols = len(subvols_dict['volume_subvols']) - g.log.info("Number of subvolumes in volume %s:" % num_subvols) + g.log.info("Number of subvolumes in volume %s:", num_subvols) # bring bricks offline( 2 bricks ) for all the subvolumes for i in range(0, num_subvols): subvol_brick_list = subvols_dict['volume_subvols'][i] - g.log.info("sub-volume %s brick list : %s" - % (i, subvol_brick_list)) + g.log.info("sub-volume %s brick list : %s", i, subvol_brick_list) # For volume type: 1 * 2, bring 1 brick offline if len(subvol_brick_list) == 2: bricks_to_bring_offline = subvol_brick_list[0:1] else: bricks_to_bring_offline = subvol_brick_list[0:2] g.log.info("Going to bring down the brick process " - "for %s" % bricks_to_bring_offline) + "for %s", bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully" % bricks_to_bring_offline) + "for %s succesfully", bricks_to_bring_offline) # create 2 files named newfile0.txt and newfile1.txt g.log.info("Start creating 2 files on all mounts...") @@ -198,7 +197,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "ln %s/file0.txt %s/file0.txt_hwlink" \ % (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and creating hard link" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -211,7 +210,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "ln -s %s/file1.txt %s/file1.txt_swlink" %\ (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and creating soft link" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -224,7 +223,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "cat %s/file0.txt >> %s/file1.txt" %\ (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and append successful" " on read-only filesystem")) self.assertIn("Read-only file system", @@ -237,7 +236,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "echo 'Modify Contents' > %s/file1.txt"\ % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and modifying successful" " on read-only filesystem")) self.assertIn("Read-only file system", @@ -249,7 +248,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("Truncating file1.txt on all mounts") for mount_obj in self.mounts: cmd = "truncate -s 0 %s/file1.txt" % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and truncating file" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -277,7 +276,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("stat on file1.txt on all mounts") for mount_obj in self.mounts: cmd = "stat %s/file1.txt" % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and stat on file fails" " on read-only filesystem")) g.log.info("stat on file is successfull on read-only filesystem") @@ -287,7 +286,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = ("python %s stat %s" % (self.script_upload_path, mount_obj.mountpoint)) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and stat on directory" " fails on read-only filesystem")) g.log.info("stat on dir is successfull on read-only filesystem") @@ -297,7 +296,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = ("python %s ls %s" % (self.script_upload_path, mount_obj.mountpoint)) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and listing file fails" " on read-only filesystem")) g.log.info("listing files is successfull on read-only filesystem") @@ -316,33 +315,31 @@ class ClientSideQuorumTests(GlusterBaseClass): # set cluster.quorum-type to fixed options = {"cluster.quorum-type": "fixed"} - g.log.info("setting %s for the volume %s" % (options, self.volname)) + g.log.info("setting %s for the volume %s", options, self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set %s for volume %s" % (options, self.volname))) - g.log.info("Successfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Successfully set %s for volume %s", options, self.volname) # get the subvolumes - g.log.info("Starting to get sub-volumes for volume %s" % self.volname) + g.log.info("Starting to get sub-volumes for volume %s", self.volname) subvols_dict = get_subvols(self.mnode, self.volname) num_subvols = len(subvols_dict['volume_subvols']) - g.log.info("Number of subvolumes in volume %s is %s" - % (self.volname, num_subvols)) + g.log.info("Number of subvolumes in volume %s is %s", self.volname, + num_subvols) # get the number of bricks in replica set num_bricks_in_subvol = len(subvols_dict['volume_subvols'][0]) - g.log.info("Number of bricks in each replica set : %s" - % num_bricks_in_subvol) + g.log.info("Number of bricks in each replica set : %s", + num_bricks_in_subvol) # set cluster.quorum-count to higher value than the number of bricks in # repliac set start_range = num_bricks_in_subvol + 1 end_range = num_bricks_in_subvol + 30 for i in range(start_range, end_range): - options = {"cluster.quorum-count": "%s" % start_range} - g.log.info("setting %s for the volume %s" % - (options, self.volname)) + options = {"cluster.quorum-count": "%s" % i} + g.log.info("setting %s for the volume %s", options, self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertFalse(ret, ("Able to set %s for volume %s, quorum-count" " should not be greater than number of" @@ -350,7 +347,7 @@ class ClientSideQuorumTests(GlusterBaseClass): % (options, self.volname))) g.log.info("Expected: Unable to set %s for volume %s, " "quorum-count should be less than number of bricks " - "in replica set" % (options, self.volname)) + "in replica set", options, self.volname) @runs_on([['distributed-replicated'], @@ -363,8 +360,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): GlusterBaseClass.setUpClass.im_func(cls) # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) script_local_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" @@ -373,23 +370,22 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): if not ret: raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 in + # this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. # Setup Volumes if cls.volume_type == "distributed-replicated": @@ -407,8 +403,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s_%d' % (cls.volume['voltype']['type'], i), 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Define two 2x3 distributed-replicated volumes for i in range(1, 3): @@ -422,8 +417,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s_%d' % (cls.volume['voltype']['type'], i+2), 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Define distributed volume cls.volume['voltype'] = { @@ -435,8 +429,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s' % cls.volume['voltype']['type'], 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Create and mount volumes cls.mount_points = [] @@ -450,7 +443,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): force=False) if not ret: raise ExecutionError("Failed to setup Volume" - " %s", volume_config['name']) + " %s" % volume_config['name']) g.log.info("Successful in setting volume %s", volume_config['name']) @@ -468,8 +461,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): raise ExecutionError( "Failed to do gluster mount on volume %s " % cls.volname) - g.log.info("Successfully mounted %s on client %s" - % (cls.volname, cls.client)) + g.log.info("Successfully mounted %s on client %s", + cls.volname, cls.client) def setUp(self): # Calling GlusterBaseClass setUp @@ -515,7 +508,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): ret = cleanup_volume(cls.mnode, volume) if not ret: raise ExecutionError("Failed to cleanup Volume %s" % volume) - g.log.info("Volume: %s cleanup is done" % volume) + g.log.info("Volume: %s cleanup is done", volume) g.log.info("Successfully Cleanedup all Volumes") # umount all volumes @@ -525,8 +518,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): raise ExecutionError( "Failed to umount on volume %s " % cls.volname) - g.log.info("Successfully umounted %s on client %s" - % (cls.volname, cls.client)) + g.log.info("Successfully umounted %s on client %s", cls.volname, + cls.client) # calling GlusterBaseClass tearDownClass GlusterBaseClass.tearDownClass.im_func(cls) @@ -545,12 +538,13 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): - bring down b0 on vol1 and b0 and b1 on vol3 - try to create files on all vols and check for result """ + # pylint: disable=too-many-locals,too-many-statements # Creating files for all volumes for mount_point in self.mount_points: self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_point)) + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_point) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 50 " @@ -576,19 +570,17 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): % vol_number) options = {"cluster.quorum-type": "auto"} g.log.info("setting cluster.quorum-type to auto on " - "volume testvol_distributed-replicated_%s" - % vol_number) + "volume testvol_distributed-replicated_%s", vol_number) ret = set_volume_options(self.mnode, vol_name, options) self.assertTrue(ret, ("Unable to set volume option %s for " "volume %s" % (options, vol_name))) - g.log.info("Sucessfully set %s for volume %s" - % (options, vol_name)) + g.log.info("Sucessfully set %s for volume %s", options, vol_name) # check is options are set correctly volume_list = get_volume_list(self.mnode) for volume in volume_list: - g.log.info('Checking for cluster.quorum-type option for %s' - % volume) + g.log.info('Checking for cluster.quorum-type option for %s', + volume) volume_options_dict = get_volume_options(self.mnode, volume, 'cluster.quorum-type') @@ -599,16 +591,14 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): 'Option cluster.quorum-type ' 'is not AUTO for %s' % volume) - g.log.info('Option cluster.quorum-type is AUTO for %s' - % volume) + g.log.info('Option cluster.quorum-type is AUTO for %s', volume) else: self.assertEqual(volume_options_dict['cluster.quorum-type'], 'none', 'Option cluster.quorum-type ' 'is not NONE for %s' % volume) - g.log.info('Option cluster.quorum-type is NONE for %s' - % volume) + g.log.info('Option cluster.quorum-type is NONE for %s', volume) # Get first brick server and brick path # and get first file from filelist then delete it from volume @@ -616,11 +606,11 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): for volume in volume_list: brick_list = get_all_bricks(self.mnode, volume) brick_server, brick_path = brick_list[0].split(':') - ret, file_list, err = g.run(brick_server, 'ls %s' % brick_path) + ret, file_list, _ = g.run(brick_server, 'ls %s' % brick_path) self.assertFalse(ret, 'Failed to ls files on %s' % brick_server) file_from_vol = file_list.splitlines()[0] - ret, out, err = g.run(brick_server, 'rm -rf %s/%s' - % (brick_path, file_from_vol)) + ret, _, _ = g.run(brick_server, 'rm -rf %s/%s' + % (brick_path, file_from_vol)) self.assertFalse(ret, 'Failed to rm file on %s' % brick_server) vols_file_list[volume] = file_from_vol @@ -629,7 +619,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): volname = 'testvol_distributed-replicated_1' brick_list = get_all_bricks(self.mnode, volname) bricks_to_bring_offline = brick_list[0:1] - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -638,14 +628,14 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # bring first two bricks for testvol_distributed-replicated_3 volname = 'testvol_distributed-replicated_3' brick_list = get_all_bricks(self.mnode, volname) bricks_to_bring_offline = brick_list[0:2] - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -654,8 +644,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # merge two dicts (volname: file_to_delete) and (volname: mountpoint) temp_dict = [vols_file_list, self.mount_points_and_volnames] diff --git a/tests/functional/afr/test_heal_info_while_accessing_file.py b/tests/functional/afr/test_heal_info_while_accessing_file.py deleted file mode 100644 index 316880318..000000000 --- a/tests/functional/afr/test_heal_info_while_accessing_file.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (C) 2015-2016 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -from glusto.core import Glusto as g -from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on) -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.volume_libs import get_subvols -from glustolibs.gluster.brick_libs import (bring_bricks_offline, - are_bricks_offline, - get_all_bricks) - -from glustolibs.gluster.heal_ops import get_heal_info_summary -from glustolibs.misc.misc_libs import upload_scripts -from glustolibs.io.utils import (validate_io_procs, - list_all_files_and_dirs_mounts, - wait_for_io_to_complete) - - -@runs_on([['distributed-replicated'], - ['glusterfs']]) -class TestSelfHeal(GlusterBaseClass): - """ - Description: - Test cases related to - healing in default configuration of the volume - """ - - @classmethod - def setUpClass(cls): - # Calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - - # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) - script_local_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - ret = upload_scripts(cls.clients, [script_local_path]) - if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s" - % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) - - cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ - - def setUp(self): - # Calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) - - self.all_mounts_procs = [] - self.io_validation_complete = False - - # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume and Mount Volume") - ret = self.setup_volume_and_mount_volume(mounts=self.mounts, - volume_create_force=False) - if not ret: - raise ExecutionError("Failed to Setup_Volume and Mount_Volume") - g.log.info("Successful in Setup Volume and Mount Volume") - - self.bricks_list = get_all_bricks(self.mnode, self.volname) - - def tearDown(self): - """ - If test method failed before validating IO, tearDown waits for the - IO's to complete and checks for the IO exit status - - Cleanup and umount volume - """ - if not self.io_validation_complete: - g.log.info("Wait for IO to complete as IO validation did not " - "succeed in test method") - ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts) - if not ret: - raise ExecutionError("IO failed on some of the clients") - g.log.info("IO is successful on all mounts") - - # List all files and dirs created - g.log.info("List all files and directories:") - ret = list_all_files_and_dirs_mounts(self.mounts) - if not ret: - raise ExecutionError("Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") - - # Cleanup and umount volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") - ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) - if not ret: - raise ExecutionError("Failed to umount the vol & cleanup Volume") - g.log.info("Successful in umounting the volume and Cleanup") - - # Calling GlusterBaseClass teardown - GlusterBaseClass.tearDown.im_func(self) - - def test_heal_info_shouldnot_list_files_being_accessed(self): - """ - - bring brick 1 offline - - create files and validate IO - - get entries before accessing file - - get first filename from active subvol without offline bricks - - access and modify the file - - while accessing - get entries - - Compare entries before accessing and while accessing - - validate IO - """ - - # Bring 1-st brick offline - brick_to_bring_offline = [self.bricks_list[0]] - g.log.info('Bringing bricks %s offline...' % brick_to_bring_offline) - ret = bring_bricks_offline(self.volname, brick_to_bring_offline) - self.assertTrue(ret, 'Failed to bring bricks %s offline' - % brick_to_bring_offline) - - ret = are_bricks_offline(self.mnode, self.volname, - brick_to_bring_offline) - self.assertTrue(ret, 'Bricks %s are not offline' - % brick_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % brick_to_bring_offline) - - # Creating files on client side - for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_obj.mountpoint)) - - # Creating files - cmd = ("python %s create_files -f 100 %s" - % (self.script_upload_path, mount_obj.mountpoint)) - - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - self.all_mounts_procs.append(proc) - - # Validate IO - g.log.info("Wait for IO to complete and validate IO ...") - ret = validate_io_procs(self.all_mounts_procs, self.mounts) - self.io_validation_complete = True - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("IO is successful on all mounts") - - # Get entries before accessing file - g.log.info("Getting entries_before_accessing file...") - entries_before_accessing = get_heal_info_summary( - self.mnode, self.volname) - self.assertNotEqual(entries_before_accessing, None, - 'Can`t get heal info summary') - g.log.info( - "Getting entries_before_accessing file finished successfully") - - # Get filename to access from active subvol without offline bricks - # Get last subvol - subvols = get_subvols(self.mnode, self.volname) - subvol_without_offline_brick = subvols['volume_subvols'][-1] - - # Get first brick server and brick path - # and get first file from filelist - subvol_mnode, mnode_brick = subvol_without_offline_brick[0].split(':') - ret, file_list, err = g.run(subvol_mnode, 'ls %s' % mnode_brick) - file_to_edit = file_list.splitlines()[0] - - # Access and modify the file - g.log.info("Start modifying IO on all mounts...") - self.all_mounts_procs = [] - for mount_obj in self.mounts: - g.log.info("Modifying IO on %s:%s", mount_obj.client_system, - mount_obj.mountpoint) - - cmd = ("cd %s/ ; " - "dd if=/dev/zero of=%s bs=1G count=1" - % (mount_obj.mountpoint, file_to_edit)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - self.all_mounts_procs.append(proc) - g.log.info("IO on %s:%s is modified successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) - self.io_validation_complete = False - - # Get entries while accessing file - g.log.info("Getting entries while accessing file...") - entries_while_accessing = get_heal_info_summary( - self.mnode, self.volname) - self.assertNotEqual(entries_before_accessing, None, - 'Can`t get heal info summary') - g.log.info("Getting entries while accessing file " - "finished successfully") - - # Compare dicts before accessing and while accessing - g.log.info('Comparing entries before modifying and while modifying...') - ret = cmp(entries_before_accessing, entries_while_accessing) - self.assertEqual(ret, 0, 'Entries before modifying and while modifying' - 'are not equal') - g.log.info('Comparison entries before modifying and while modifying' - 'finished successfully.') - - # Validate IO - g.log.info("Wait for IO to complete and validate IO ...") - ret = validate_io_procs(self.all_mounts_procs, self.mounts) - self.assertTrue(ret, "IO failed on some of the clients") - self.io_validation_complete = True - g.log.info("IO is successful on all mounts") diff --git a/tests/functional/afr/test_self_heal_daemon_process.py b/tests/functional/afr/test_self_heal_daemon_process.py deleted file mode 100644 index f3c416687..000000000 --- a/tests/functional/afr/test_self_heal_daemon_process.py +++ /dev/null @@ -1,663 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" Description: - Test Cases in this module tests the self heal daemon process. -""" - -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.volume_libs import ( - expand_volume, shrink_volume, log_volume_info_and_status, - wait_for_volume_process_to_be_online) -from glustolibs.gluster.rebalance_ops import (rebalance_start, - wait_for_rebalance_to_complete, - rebalance_status) -from glustolibs.gluster.brick_libs import (get_all_bricks, - bring_bricks_offline, - bring_bricks_online, - are_bricks_online, - select_bricks_to_bring_offline) -from glustolibs.gluster.brick_ops import replace_brick -from glustolibs.gluster.heal_libs import (get_self_heal_daemon_pid, - do_bricks_exist_in_shd_volfile, - is_shd_daemonized, - are_all_self_heal_daemons_are_online) -from glustolibs.gluster.volume_ops import (volume_stop, volume_start) -from glustolibs.gluster.gluster_init import restart_glusterd - - -@runs_on([['replicated', 'distributed-replicated', 'dispersed', - 'distributed-dispersed'], ['glusterfs', 'nfs', 'cifs']]) -class SelfHealDaemonProcessTests(GlusterBaseClass): - """ - SelfHealDaemonProcessTests contains tests which verifies the - self-heal daemon process of the nodes - """ - @classmethod - def setUpClass(cls): - """ - setup volume, mount volume and initialize necessary variables - which is used in tests - """ - - # calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - - # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume and Mount Volume") - ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts) - if not ret: - raise ExecutionError("Failed to Setup_Volume and Mount_Volume") - g.log.info("Successful in Setup Volume and Mount Volume") - - # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(cls.servers) - if not ret: - raise ExecutionError("Self Heal Daemon process was still" - " holding parent process.") - g.log.info("Self Heal Daemon processes are online") - - cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol" - - def setUp(self): - """ - setUp method for every test - """ - - # calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) - - def tearDown(self): - """ - tearDown for every test - """ - - # Calling GlusterBaseClass tearDown - GlusterBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - """ - Clean up the volume and umount volume from client - """ - - # stopping the volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") - ret = cls.unmount_volume_and_cleanup_volume(mounts=cls.mounts) - if not ret: - raise ExecutionError("Failed to Unmount Volume and Cleanup Volume") - g.log.info("Successful in Unmount Volume and Cleanup Volume") - - # calling GlusterBaseClass tearDownClass - GlusterBaseClass.tearDownClass.im_func(cls) - - def test_glustershd_with_add_remove_brick(self): - """ - Test script to verify glustershd process with adding and - removing bricks - - * check glustershd process - only 1 glustershd process should - be running - * bricks must be present in glustershd-server.vol file for - the replicated involved volumes - * Add bricks - * check glustershd process - only 1 glustershd process should - be running and its should be different from previous one - * bricks which are added must present in glustershd-server.vol file - * remove bricks - * check glustershd process - only 1 glustershd process should - be running and its different from previous one - * bricks which are removed should not present - in glustershd-server.vol file - - """ - - nodes = self.volume['servers'] - bricks_list = [] - glustershd_pids = {} - - # check the self-heal daemon process - g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process " - "found : %s" % pids)) - g.log.info("Successful in getting Single self heal daemon process" - " on all nodes %s", nodes) - glustershd_pids = pids - - # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # validate the bricks present in volume info with - # glustershd server volume file - g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list) - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file. " - "Please check log file for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - # expanding volume - g.log.info("Start adding bricks to volume %s" % self.volname) - ret = expand_volume(self.mnode, self.volname, self.servers, - self.all_servers_info) - self.assertTrue(ret, ("Failed to add bricks to " - "volume %s " % self.volname)) - g.log.info("Add brick successfull") - - # Log Volume Info and Status after expanding the volume - g.log.info("Logging volume info and Status after expanding volume") - ret = log_volume_info_and_status(self.mnode, self.volname) - self.assertTrue(ret, ("Logging volume info and status failed " - "on volume %s", self.volname)) - g.log.info("Successful in logging volume info and status " - "of volume %s", self.volname) - - # Verify volume's all process are online for 60 sec - g.log.info("Verifying volume's all process are online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, - 60) - self.assertTrue(ret, ("Volume %s : All process are not " - "online", self.volname)) - g.log.info("Successfully Verified volume %s processes are online", - self.volname) - - # Start Rebalance - g.log.info("Starting Rebalance on the volume") - ret, out, err = rebalance_start(self.mnode, self.volname) - self.assertEqual(ret, 0, ("Failed to start rebalance on " - "the volume %s with error %s" % - (self.volname, err))) - g.log.info("Successfully started rebalance on the " - "volume %s", self.volname) - - # Log Rebalance status - g.log.info("Log Rebalance status") - _, _, _ = rebalance_status(self.mnode, self.volname) - - # Wait for rebalance to complete - g.log.info("Waiting for rebalance to complete") - ret = wait_for_rebalance_to_complete(self.mnode, self.volname) - self.assertTrue(ret, ("Rebalance is not yet complete " - "on the volume %s", self.volname)) - g.log.info("Rebalance is successfully complete on " - "the volume %s", self.volname) - - # Check Rebalance status after rebalance is complete - g.log.info("Checking Rebalance status") - ret, _, _ = rebalance_status(self.mnode, self.volname) - self.assertEqual(ret, 0, ("Failed to get rebalance status for " - "the volume %s", self.volname)) - g.log.info("Successfully got rebalance status of the " - "volume %s", self.volname) - - # Check the self-heal daemon process after adding bricks - g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) - glustershd_pids_after_expanding = {} - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - g.log.info("Successfull in getting self-heal daemon process " - "on nodes %s" % nodes) - - glustershd_pids_after_expanding = pids - g.log.info("Self Heal Daemon Process ID's afetr expanding " - "volume: %s" % glustershd_pids_after_expanding) - - self.assertNotEqual(glustershd_pids, - glustershd_pids_after_expanding, - "Self Daemon process is same before and" - " after adding bricks") - g.log.info("Self Heal Daemon Process is different before and " - "after adding bricks") - - # get the bricks for the volume after expanding - bricks_list_after_expanding = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List after expanding " - "volume: %s" % bricks_list_after_expanding) - - # validate the bricks present in volume info - # with glustershd server volume file after adding bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list_after_expanding) - - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file after " - "expanding bricks. Please check log file " - "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - # shrink the volume - g.log.info("Starting volume shrink") - ret = shrink_volume(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to shrink the volume on " - "volume %s", self.volname)) - g.log.info("Shrinking volume is successful on " - "volume %s", self.volname) - - # Log Volume Info and Status after shrinking the volume - g.log.info("Logging volume info and Status after shrinking volume") - ret = log_volume_info_and_status(self.mnode, self.volname) - self.assertTrue(ret, ("Logging volume info and status failed on " - "volume %s", self.volname)) - g.log.info("Successful in logging volume info and status " - "of volume %s", self.volname) - - # get the bricks after shrinking the volume - bricks_list_after_shrinking = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List after shrinking " - "volume: %s" % bricks_list_after_shrinking) - - self.assertEqual(len(bricks_list_after_shrinking), len(bricks_list), - "Brick Count is mismatched after " - "shrinking the volume %s" % self.volname) - g.log.info("Brick Count matched before before expanding " - "and after shrinking volume") - - # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - - # check the self-heal daemon process after removing bricks - g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) - glustershd_pids_after_shrinking = {} - ret, pids = get_self_heal_daemon_pid(nodes) - glustershd_pids_after_shrinking = pids - self.assertNotEqual(glustershd_pids_after_expanding, - glustershd_pids_after_shrinking, - "Self Heal Daemon process is same " - "after adding bricks and shrinking volume") - g.log.info("Self Heal Daemon Process is different after adding bricks " - "and shrinking volume") - - # validate bricks present in volume info - # with glustershd server volume file after removing bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list_after_shrinking) - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file after " - "removing bricks. Please check log file " - "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - def test_glustershd_with_restarting_glusterd(self): - """ - Test Script to verify the self heal daemon process with restarting - glusterd and rebooting the server - - * stop all volumes - * restart glusterd - should not run self heal daemon process - * start replicated involved volumes - * single self heal daemon process running - * restart glusterd - * self heal daemon pid will change - * bring down brick and restart glusterd - * self heal daemon pid will change and its different from previous - * brought up the brick - - """ - - nodes = self.volume['servers'] - - # stop the volume - g.log.info("Stopping the volume %s" % self.volname) - ret = volume_stop(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) - g.log.info("Successfully stopped volume %s" % self.volname) - - # check the self heal daemon process after stopping the volume - g.log.info("Verifying the self heal daemon process for " - "volume %s" % self.volname) - ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) - self.assertFalse(ret, ("Self Heal Daemon process is still running " - "even after stopping volume %s" % self.volname)) - g.log.info("Self Heal Daemon is not running after stopping " - "volume %s" % self.volname) - - # restart glusterd service on all the servers - g.log.info("Restarting glusterd on all servers %s", nodes) - ret = restart_glusterd(nodes) - self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", - nodes)) - g.log.info("Successfully restarted glusterd on all nodes %s", - nodes) - - # check the self heal daemon process after restarting glusterd process - g.log.info("Starting to get self-heal daemon process on" - " nodes %s" % nodes) - ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) - self.assertFalse(ret, ("Self Heal Daemon process is running after " - "glusterd restart with volume %s in " - "stop state" % self.volname)) - g.log.info("Self Heal Daemon is not running after stopping " - "volume and restarting glusterd %s" % self.volname) - - # start the volume - g.log.info("Starting the volume %s" % self.volname) - ret = volume_start(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) - g.log.info("Volume %s started successfully" % self.volname) - - # Verfiy glustershd process releases its parent process - g.log.info("Checking whether glustershd process is daemonized or not") - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - g.log.info("Single self heal daemon process on all nodes %s" % nodes) - - # get the self heal daemon pids after starting volume - g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - g.log.info("Succesfull in getting self heal daemon pids") - glustershd_pids = pids - - # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # validate the bricks present in volume info - # with glustershd server volume file - g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list) - self.assertTrue(ret, ("Brick List from volume info is different from " - "glustershd server volume file. " - "Please check log file for details.")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - # restart glusterd service on all the servers - g.log.info("Restarting glusterd on all servers %s", nodes) - ret = restart_glusterd(nodes) - self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", - nodes)) - g.log.info("Successfully restarted glusterd on all nodes %s", - nodes) - - # Verify volume's all process are online for 60 sec - g.log.info("Verifying volume's all process are online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, - 60) - self.assertTrue(ret, ("Volume %s : All process are not " - "online", self.volname)) - g.log.info("Successfully Verified volume %s processes are online", - self.volname) - - # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - - # check the self heal daemon process after starting volume and - # restarting glusterd process - g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - glustershd_pids_after_glusterd_restart = pids - - self.assertNotEqual(glustershd_pids, - glustershd_pids_after_glusterd_restart, - ("Self Heal Daemon pids are same after " - "restarting glusterd process")) - g.log.info("Self Heal Daemon process are different before and " - "after restarting glusterd process") - - # select bricks to bring offline - bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( - self.mnode, self.volname)) - bricks_to_bring_offline = filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks'])) - - # bring bricks offline - g.log.info("Going to bring down the brick process " - "for %s" % bricks_to_bring_offline) - ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) - self.assertTrue(ret, ("Failed to bring down the bricks. Please " - "check the log file for more details.")) - g.log.info("Brought down the brick process " - "for %s succesfully" % bricks_to_bring_offline) - - # restart glusterd after brought down the brick - g.log.info("Restart glusterd on all servers %s", nodes) - ret = restart_glusterd(nodes) - self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", - nodes)) - g.log.info("Successfully restarted glusterd on all nodes %s", - nodes) - - # Verify volume's all process are online for 60 sec - g.log.info("Verifying volume's all process are online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, - 60) - self.assertTrue(ret, ("Volume %s : All process are not " - "online", self.volname)) - g.log.info("Successfully Verified volume %s processes are online", - self.volname) - - # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - - # check the self heal daemon process after killing brick and - # restarting glusterd process - g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - glustershd_pids_after_killing_brick = pids - - self.assertNotEqual(glustershd_pids_after_glusterd_restart, - glustershd_pids_after_killing_brick, - ("Self Heal Daemon process are same from before " - "killing the brick,restarting glusterd process")) - g.log.info("Self Heal Daemon process are different after killing the " - "brick, restarting the glusterd process") - - # brought the brick online - g.log.info("bringing up the bricks : %s online" % - bricks_to_bring_offline) - ret = bring_bricks_online(self.mnode, self.volname, - bricks_to_bring_offline) - self.assertTrue(ret, ("Failed to brought the bricks online")) - g.log.info("Successfully brought the bricks online") - - # check all bricks are online - g.log.info("Verifying all bricka are online or not.....") - ret = are_bricks_online(self.mnode, self.volname, - bricks_to_bring_offline) - self.assertTrue(ret, ("Not all bricks are online")) - g.log.info("All bricks are online.") - - -@runs_on([['replicated', 'distributed-replicated'], - ['glusterfs', 'nfs', 'cifs']]) -class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): - """ - ClientSideQuorumTests contains tests which verifies the - client side quorum Test Cases - """ - - @classmethod - def setUpClass(cls): - # Calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - - # Override Volumes - if cls.volume_type == "distributed-replicated": - # Define distributed-replicated volume - cls.volume['voltype'] = { - 'type': 'distributed-replicated', - 'dist_count': 2, - 'replica_count': 3, - 'arbiter_count': 1, - 'transport': 'tcp'} - - cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol" - - def setUp(self): - """ - setUp method for every test - """ - - # calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) - - self.all_mounts_procs = [] - self.io_validation_complete = False - - # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume %s" % self.volname) - ret = self.setup_volume_and_mount_volume(self.mounts, - volume_create_force=False) - if not ret: - raise ExecutionError("Failed to Setup_Volume and Mount_Volume") - g.log.info("Successful in Setup Volume and Mount Volume") - - def tearDown(self): - """ - If test method failed before validating IO, tearDown waits for the - IO's to complete and checks for the IO exit status - - Cleanup and umount volume - """ - # Cleanup and umount volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") - ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) - if not ret: - raise ExecutionError("Failed to umount the vol & cleanup Volume") - g.log.info("Successful in umounting the volume and Cleanup") - - # Calling GlusterBaseClass teardown - GlusterBaseClass.tearDown.im_func(self) - - def test_impact_of_replace_brick_for_glustershd(self): - nodes = self.volume['servers'] - - # check the self-heal daemon process - g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process " - "found : %s" % pids)) - g.log.info("Successful in getting Single self heal daemon process" - " on all nodes %s", nodes) - glustershd_pids = pids - - # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # validate the bricks present in volume info with - # glustershd server volume file - g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list) - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file. " - "Please check log file for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - # replace brick - brick_to_replace = bricks_list[-1] - new_brick = brick_to_replace + 'new' - g.log.info("Replacing the brick %s for the volume : %s" - % (brick_to_replace, self.volname)) - ret, out, err = replace_brick(self.mnode, self.volname, - brick_to_replace, new_brick) - self.assertFalse(ret, err) - g.log.info('Replaced brick %s to %s successfully' - % (brick_to_replace, new_brick)) - - # check bricks - bricks_list = get_all_bricks(self.mnode, self.volname) - self.assertEqual(bricks_list[-1], new_brick, 'Replaced brick and ' - 'new brick are not equal') - - # Verify volume's all process are online for 60 sec - g.log.info("Verifying volume's all process are online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, - timeout=60) - self.assertTrue(ret, ("Volume %s : All process are not " - "online", self.volname)) - g.log.info("Successfully Verified volume %s processes are online", - self.volname) - - # Verify glustershd process releases its parent process - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - - # check the self-heal daemon process - g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process " - "found : %s" % pids)) - g.log.info("Successful in getting Single self heal daemon process" - " on all nodes %s", nodes) - glustershd_pids_after_replacement = pids - - # Compare pids before and after replacing - self.assertNotEqual(glustershd_pids, - glustershd_pids_after_replacement, - "Self Daemon process is same before and" - " after replacing bricks") - g.log.info("Self Heal Daemon Process is different before and " - "after replacing bricks") - - # get the bricks for the volume after replacing - bricks_list_after_replacing = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List after expanding " - "volume: %s" % bricks_list_after_replacing) - - # validate the bricks present in volume info - # with glustershd server volume file after replacing bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list_after_replacing) - - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file after " - "replacing bricks. Please check log file " - "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) diff --git a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py index 2bfd7c8d0..4fe2a8ba8 100644 --- a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py +++ b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py @@ -42,11 +42,10 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass): if cls.volume_type == "replicated": cls.volume['voltype'] = { - 'type': 'replicated', - 'replica_count': 3, - 'dist_count': 3, - 'transport': 'tcp' - } + 'type': 'replicated', + 'replica_count': 3, + 'dist_count': 3, + 'transport': 'tcp'} def setUp(self): """ @@ -79,6 +78,7 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass): Description:- Reduce the replica count from replica 3 to arbiter """ + # pylint: disable=too-many-statements # Log Volume Info and Status g.log.info("Logging volume info and Status") ret = log_volume_info_and_status(self.mnode, self.volname) diff --git a/tests/functional/bvt/__init__.py b/tests/functional/bvt/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/functional/bvt/test_basic.py b/tests/functional/bvt/test_basic.py index 54078ef44..4dbb23cb5 100644 --- a/tests/functional/bvt/test_basic.py +++ b/tests/functional/bvt/test_basic.py @@ -16,8 +16,8 @@ """ Description: BVT-Basic Tests """ -import pytest import time +import pytest from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass from glustolibs.gluster.exceptions import ExecutionError @@ -112,7 +112,7 @@ class TestGlusterdSanity(GlusterBaseClass): ret = restart_glusterd(self.servers) if not ret: raise ExecutionError("Failed to restart glusterd on all " - "servers %s", self.servers) + "servers %s" % self.servers) g.log.info("Successfully restarted glusterd on all servers %s", self.servers) diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py index 81f692c9b..dac0fe88d 100644 --- a/tests/functional/bvt/test_cvt.py +++ b/tests/functional/bvt/test_cvt.py @@ -82,25 +82,24 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass): "file_dir_ops.py") ret = upload_scripts(cls.clients, script_local_path) if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s", + raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) g.log.info("Successfully uploaded IO scripts to clients %s", cls.clients) cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 in + # this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. def setUp(self): """ @@ -692,23 +691,23 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass): - wait for heal to complete - validate IO """ + # pylint: disable=too-many-statements # Check if volume type is dispersed. If the volume type is # dispersed, set the volume option 'disperse.optimistic-change-log' # to 'off' # Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1470938 + # pylint: disable=unsupported-membership-test if 'dispersed' in self.volume_type and 'nfs' in self.mount_type: g.log.info("Set volume option 'disperse.optimistic-change-log' " "to 'off' on a dispersed volume . " "Refer to bug: " "https://bugzilla.redhat.com/show_bug.cgi?id=1470938") ret = set_volume_options(self.mnode, self.volname, - {'disperse.optimistic-change-log': 'off'} - ) + {'disperse.optimistic-change-log': 'off'}) self.assertTrue(ret, ("Failed to set the volume option %s to " "off on volume %s", 'disperse.optimistic-change-log', - self.volname) - ) + self.volname)) g.log.info("Successfully set the volume option " "'disperse.optimistic-change-log' to 'off'") diff --git a/tests/functional/bvt/test_vvt.py b/tests/functional/bvt/test_vvt.py index 1cff6750b..8b3b69bf3 100644 --- a/tests/functional/bvt/test_vvt.py +++ b/tests/functional/bvt/test_vvt.py @@ -57,7 +57,7 @@ class VolumeAccessibilityTests(GlusterBaseClass): "file_dir_ops.py") ret = upload_scripts(cls.clients, script_local_path) if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s", + raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) g.log.info("Successfully uploaded IO scripts to clients %s", cls.clients) @@ -72,7 +72,7 @@ class VolumeAccessibilityTests(GlusterBaseClass): g.log.info("Starting to Setup Volume %s", self.volname) ret = self.setup_volume() if not ret: - raise ExecutionError("Failed to Setup Volume %s", self.volname) + raise ExecutionError("Failed to Setup Volume %s" % self.volname) g.log.info("Successful in Setup Volume %s", self.volname) def tearDown(self): @@ -82,7 +82,7 @@ class VolumeAccessibilityTests(GlusterBaseClass): g.log.info("Starting to Setup Volume %s", self.volname) ret = self.cleanup_volume() if not ret: - raise ExecutionError("Failed to Setup_Volume %s", self.volname) + raise ExecutionError("Failed to Setup_Volume %s" % self.volname) g.log.info("Successful in Setup Volume %s", self.volname) # Calling GlusterBaseClass tearDown diff --git a/tests/functional/dht/__init__.py b/tests/functional/dht/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/functional/dht/test_negative_exercise_add_brick_command.py b/tests/functional/dht/test_negative_exercise_add_brick_command.py index 69caf3d2e..0824b1f14 100644 --- a/tests/functional/dht/test_negative_exercise_add_brick_command.py +++ b/tests/functional/dht/test_negative_exercise_add_brick_command.py @@ -19,6 +19,7 @@ from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on) +# pylint: disable=no-name-in-module from glustolibs.gluster.volume_libs import (form_bricks_list_to_add_brick, get_subvols, setup_volume, cleanup_volume) @@ -61,7 +62,7 @@ class ExerciseAddbrickCommand(GlusterBaseClass): ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Unable to delete volume % s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) + g.log.info("Volume deleted successfully : %s", volume) # Calling GlusterBaseClass tearDown GlusterBaseClass.tearDown.im_func(self) @@ -73,8 +74,7 @@ class ExerciseAddbrickCommand(GlusterBaseClass): self.servers, self.all_servers_info) cmd = ("gluster volume add-brick %s " % (' '.join(bricks_list))) - g.log.info("Adding bricks without specifying volume name", - self.volname) + g.log.info("Adding bricks without specifying volume name") _, _, err = g.run(self.mnode, cmd) self.assertIn("does not exist", err, "add-brick is successful") g.log.info("Volume add-brick failed with error %s ", err) @@ -127,7 +127,7 @@ class ExerciseAddbrickCommand(GlusterBaseClass): bricks_list = get_subvols(self.mnode, self.volname)['volume_subvols'][0] for (i, item) in enumerate(bricks_list): - server, bricks = item.split(":") + server, _ = item.split(":") item.replace(server, "abc.def.ghi.jkl") bricks_list[i] = item.replace(server, "abc.def.ghi.jkl") g.log.info("Adding bricks to the volume %s from the host which is not" @@ -155,8 +155,8 @@ class AddBrickAlreadyPartOfAnotherVolume(GlusterBaseClass): for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: - raise ExecutionError("Unable to delete volume % s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) + raise ExecutionError("Unable to delete volume %s" % volume) + g.log.info("Volume deleted successfully : %s", volume) GlusterBaseClass.tearDown.im_func(self) diff --git a/tests/functional/glusterd/test_add_brick.py b/tests/functional/glusterd/test_add_brick.py new file mode 100644 index 000000000..aa3b6aedf --- /dev/null +++ b/tests/functional/glusterd/test_add_brick.py @@ -0,0 +1,134 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import random +from glusto.core import Glusto as g +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume +from glustolibs.gluster.volume_ops import (get_volume_list) +from glustolibs.gluster.brick_ops import add_brick +from glustolibs.gluster.lib_utils import form_bricks_list +from glustolibs.gluster.rebalance_ops import rebalance_start + + +@runs_on([['distributed-replicated'], ['glusterfs']]) +class TestVolumeCreate(GlusterBaseClass): + + @classmethod + def setUpClass(cls): + + # Calling GlusterBaseClass setUpClass + GlusterBaseClass.setUpClass.im_func(cls) + + # check whether peers are in connected state + ret = cls.validate_peers_are_connected() + if not ret: + raise ExecutionError("Peers are not in connected state") + + def tearDown(self): + + # clean up all volumes + vol_list = get_volume_list(self.mnode) + if vol_list is None: + raise ExecutionError("Failed to get the volume list") + + for volume in vol_list: + ret = cleanup_volume(self.mnode, volume) + if not ret: + raise ExecutionError("Unable to delete volume % s" % volume) + g.log.info("Volume deleted successfully : %s", volume) + + GlusterBaseClass.tearDown.im_func(self) + + def test_add_brick_functionality(self): + + ret = setup_volume(self.mnode, self.all_servers_info, self.volume) + self.assertTrue(ret, "Failed to create and start volume %s" + % self.volname) + g.log.info("Volume created and started successfully") + + # form bricks list to test add brick functionality + + replica_count_of_volume = self.volume['voltype']['replica_count'] + num_of_bricks = 4 * replica_count_of_volume + bricks_list = form_bricks_list(self.mnode, self.volname, + num_of_bricks, self.servers, + self.all_servers_info) + self.assertIsNotNone(bricks_list, "Bricks list is None") + + # Try to add a single brick to volume, which should fail as it is a + # replicated volume, we should pass multiple of replica count number + # of bricks + + bricks_list_to_add = [bricks_list[0]] + ret, _, _ = add_brick(self.mnode, self.volname, bricks_list_to_add) + self.assertNotEqual(ret, 0, "Expected: It should fail to add a single" + "brick to a replicated volume. Actual: " + "Successfully added single brick to volume") + g.log.info("failed to add a single brick to replicated volume") + + # add brick replica count number of bricks in which one is + # non existing brick + kwargs = {} + kwargs['replica_count'] = replica_count_of_volume + + bricks_list_to_add = bricks_list[1:replica_count_of_volume + 1] + + num_of_bricks = len(bricks_list_to_add) + index_of_non_existing_brick = random.randint(0, num_of_bricks - 1) + complete_brick = bricks_list_to_add[index_of_non_existing_brick] + non_existing_brick = complete_brick + "/non_existing_brick" + bricks_list_to_add[index_of_non_existing_brick] = non_existing_brick + + ret, _, _ = add_brick(self.mnode, self.volname, + bricks_list_to_add, False, **kwargs) + self.assertNotEqual(ret, 0, "Expected: It should fail to add non" + "existing brick to a volume. Actual: " + "Successfully added non existing brick to volume") + g.log.info("failed to add a non existing brick to volume") + + # adding brick from node which is not part of cluster + bricks_list_to_add = bricks_list[replica_count_of_volume + 1: + (2 * replica_count_of_volume) + 1] + + num_of_bricks = len(bricks_list_to_add) + index_of_node = random.randint(0, num_of_bricks - 1) + complete_brick = bricks_list_to_add[index_of_node].split(":") + complete_brick[0] = "abc.def.ghi.jkl" + bricks_list_to_add[index_of_node] = ":".join(complete_brick) + ret, _, _ = add_brick(self.mnode, self.volname, + bricks_list_to_add, False, **kwargs) + self.assertNotEqual(ret, 0, "Expected: It should fail to add brick " + "from a node which is not part of a cluster." + "Actual:Successfully added bricks from node which" + " is not a part of cluster to volume") + + g.log.info("Failed to add bricks form node which is not a part of " + "cluster to volume") + + # add correct number of valid bricks, it should succeed + + bricks_list_to_add = bricks_list[(2 * replica_count_of_volume) + 1: + (3 * replica_count_of_volume) + 1] + ret, _, _ = add_brick(self.mnode, self.volname, + bricks_list_to_add, False, **kwargs) + self.assertEqual(ret, 0, "Failed to add the bricks to the volume") + g.log.info("Successfully added bricks to volume") + + # Perform rebalance start operation + ret, _, _ = rebalance_start(self.mnode, self.volname) + self.assertEqual(ret, 0, "Rebalance start is success") diff --git a/tests/functional/glusterd/test_add_brick_functionality.py b/tests/functional/glusterd/test_add_brick_functionality.py deleted file mode 100644 index bd6ce3ea0..000000000 --- a/tests/functional/glusterd/test_add_brick_functionality.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -from glusto.core import Glusto as g -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume -from glustolibs.gluster.volume_ops import (get_volume_list) -from glustolibs.gluster.brick_ops import add_brick -from glustolibs.gluster.lib_utils import form_bricks_list -from glustolibs.gluster.rebalance_ops import rebalance_start -import random - - -@runs_on([['distributed-replicated'], ['glusterfs']]) -class TestVolumeCreate(GlusterBaseClass): - - @classmethod - def setUpClass(cls): - - # Calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - - # check whether peers are in connected state - ret = cls.validate_peers_are_connected() - if not ret: - raise ExecutionError("Peers are not in connected state") - - def tearDown(self): - - # clean up all volumes - vol_list = get_volume_list(self.mnode) - if vol_list is None: - raise ExecutionError("Failed to get the volume list") - - for volume in vol_list: - ret = cleanup_volume(self.mnode, volume) - if not ret: - raise ExecutionError("Unable to delete volume % s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) - - GlusterBaseClass.tearDown.im_func(self) - - def test_add_brick_functionality(self): - - ret = setup_volume(self.mnode, self.all_servers_info, self.volume) - self.assertTrue(ret, ("Failed to create and start volume %s" - % self.volname)) - g.log.info("Volume created and started succssfully") - - # form bricks list to test add brick functionality - - replica_count_of_volume = self.volume['voltype']['replica_count'] - num_of_bricks = 4 * replica_count_of_volume - bricks_list = form_bricks_list(self.mnode, self.volname, - num_of_bricks, self.servers, - self.all_servers_info) - self.assertIsNotNone(bricks_list, "Bricks list is None") - - # Try to add a single brick to volume, which should fail as it is a - # replicated volume, we should pass multiple of replica count number - # of bricks - - bricks_list_to_add = [bricks_list[0]] - ret, out, err = add_brick(self.mnode, self.volname, bricks_list_to_add) - self.assertNotEqual(ret, 0, "Expected: It should fail to add a single" - "brick to a replicated volume. Actual: " - "Successfully added single brick to volume") - g.log.info("failed to add a single brick to replicated volume") - - # add brick replica count number of bricks in which one is - # non existing brick - kwargs = {} - kwargs['replica_count'] = replica_count_of_volume - - bricks_list_to_add = bricks_list[1:replica_count_of_volume + 1] - - num_of_bricks = len(bricks_list_to_add) - index_of_non_existing_brick = random.randint(0, num_of_bricks - 1) - complete_brick = bricks_list_to_add[index_of_non_existing_brick] - non_existing_brick = complete_brick + "/non_existing_brick" - bricks_list_to_add[index_of_non_existing_brick] = non_existing_brick - - ret, out, err = add_brick(self.mnode, self.volname, - bricks_list_to_add, False, **kwargs) - self.assertNotEqual(ret, 0, "Expected: It should fail to add non" - "existing brick to a volume. Actual: " - "Successfully added non existing brick to volume") - g.log.info("failed to add a non existing brick to volume") - - # adding brick from node which is not part of cluster - bricks_list_to_add = bricks_list[replica_count_of_volume + 1: - (2 * replica_count_of_volume) + 1] - - num_of_bricks = len(bricks_list_to_add) - index_of_node = random.randint(0, num_of_bricks - 1) - complete_brick = bricks_list_to_add[index_of_node].split(":") - complete_brick[0] = "abc.def.ghi.jkl" - bricks_list_to_add[index_of_node] = ":".join(complete_brick) - ret, out, err = add_brick(self.mnode, self.volname, - bricks_list_to_add, False, **kwargs) - self.assertNotEqual(ret, 0, "Expected: It should fail to add brick " - "from a node which is not part of a cluster." - "Actual:Successfully added bricks from node which" - " is not a part of cluster to volume") - - g.log.info("Failed to add bricks form node which is not a part of " - "cluster to volume") - - # add correct number of valid bricks, it should succeed - - bricks_list_to_add = bricks_list[(2 * replica_count_of_volume) + 1: - (3 * replica_count_of_volume) + 1] - ret, out, err = add_brick(self.mnode, self.volname, - bricks_list_to_add, False, **kwargs) - self.assertEqual(ret, 0, "Failed to add the bricks to the volume") - g.log.info("Successfully added bricks to volume") - - # Perform rebalance start operation - ret, out, err = rebalance_start(self.mnode, self.volname) - self.assertEqual(ret, 0, "Rebalance start is success") diff --git a/tests/functional/glusterd/test_concurrent_set.py b/tests/functional/glusterd/test_concurrent_set.py index 91cfe659c..7c753ea78 100644 --- a/tests/functional/glusterd/test_concurrent_set.py +++ b/tests/functional/glusterd/test_concurrent_set.py @@ -29,17 +29,12 @@ class TestConcurrentSet(GlusterBaseClass): @classmethod def setUpClass(cls): GlusterBaseClass.setUpClass.im_func(cls) - g.log.info("Starting %s " % cls.__name__) - ''' - checking for peer status from every node, if peers are in not - connected state, performing peer probe. - ''' + g.log.info("Starting %s ", cls.__name__) ret = cls.validate_peers_are_connected() if not ret: raise ExecutionError("Nodes are not in peer probe state") def tearDown(self): - ''' clean up all volumes and detaches peers from cluster ''' @@ -47,7 +42,7 @@ class TestConcurrentSet(GlusterBaseClass): for volume in vol_list: ret = cleanup_volume(self.mnode, volume) self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) + g.log.info("Volume deleted successfully : %s", volume) GlusterBaseClass.tearDown.im_func(self) @@ -64,8 +59,8 @@ class TestConcurrentSet(GlusterBaseClass): ret = volume_create(self.mnode, self.volname, self.brick_list, force=False) self.assertEqual(ret[0], 0, ("Unable" - "to create volume % s" % self.volname)) - g.log.info("Volume created successfuly % s" % self.volname) + "to create volume %s" % self.volname)) + g.log.info("Volume created successfuly %s", self.volname) # Create a volume self.volname = "second-vol" @@ -76,8 +71,8 @@ class TestConcurrentSet(GlusterBaseClass): ret = volume_create(self.mnode, self.volname, self.brick_list, force=False) self.assertEqual(ret[0], 0, ("Unable" - "to create volume % s" % self.volname)) - g.log.info("Volume created successfuly % s" % self.volname) + "to create volume %s" % self.volname)) + g.log.info("Volume created successfuly %s", self.volname) cmd1 = ("for i in `seq 1 100`; do gluster volume set first-vol " "read-ahead on; done") @@ -87,8 +82,8 @@ class TestConcurrentSet(GlusterBaseClass): proc1 = g.run_async(random.choice(self.servers), cmd1) proc2 = g.run_async(random.choice(self.servers), cmd2) - ret1, out1, err1 = proc1.async_communicate() - ret2, out2, err2 = proc2.async_communicate() + ret1, _, _ = proc1.async_communicate() + ret2, _, _ = proc2.async_communicate() self.assertEqual(ret1, 0, "Concurrent volume set on different volumes " "simultaneously failed") @@ -98,7 +93,7 @@ class TestConcurrentSet(GlusterBaseClass): g.log.info("Setting options on different volumes @ same time " "successfully completed") ret = is_core_file_created(self.servers, test_timestamp) - if (ret): + if ret: g.log.info("No core file found, glusterd service " "running successfully") else: diff --git a/tests/functional/glusterd/test_nfs_quorum.py b/tests/functional/glusterd/test_nfs_quorum.py new file mode 100644 index 000000000..ced5b719f --- /dev/null +++ b/tests/functional/glusterd/test_nfs_quorum.py @@ -0,0 +1,173 @@ +# Copyright (C) 2017-2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.volume_ops import set_volume_options + + +@runs_on([['distributed', 'replicated', 'distributed-replicated', + 'dispersed', 'distributed-dispersed'], ['nfs']]) +class TestNfsMountAndServerQuorumSettings(GlusterBaseClass): + """ + Test Cases for performing NFS disable, enable and + performing NFS mount and unmoount on all volumes, + performing different types quorum settings + """ + + @classmethod + def setUpClass(cls): + GlusterBaseClass.setUpClass.im_func(cls) + g.log.info("Starting %s ", cls.__name__) + + # checking for peer status from every node + ret = cls.validate_peers_are_connected() + if not ret: + raise ExecutionError("Servers are not in peer probed state") + + def setUp(self): + """ + setUp method for every test + """ + # calling GlusterBaseClass setUp + GlusterBaseClass.setUp.im_func(self) + + # Creating Volume + g.log.info("Started creating volume") + ret = self.setup_volume() + if not ret: + raise ExecutionError("Volume creation failed: %s" % self.volname) + g.log.info("Volme created successfully : %s", self.volname) + + def tearDown(self): + """ + tearDown for every test + """ + # stopping the volume and Cleaning up the volume + ret = self.cleanup_volume() + if not ret: + raise ExecutionError("Failed Cleanup the Volume %s" % self.volname) + g.log.info("Volume deleted successfully : %s", self.volname) + + # Calling GlusterBaseClass tearDown + GlusterBaseClass.tearDown.im_func(self) + + def test_nfs_mount_quorum_settings(self): + """ + Set nfs.disable off + Mount it with nfs and unmount it + set nfs.disable enable + Mount it with nfs + Set nfs.disable disable + Enable server quorum + Set the quorum ratio to numbers and percentage, + negative- numbers should fail, negative percentage should fail, + fraction should fail, negative fraction should fail + """ + + # Mounting a NFS volume + ret = self.mount_volume(self.mounts) + self.assertTrue(ret, "NFS volume mount failed for %s" % self.volname) + g.log.info("Volume mounted sucessfully : %s", self.volname) + + # unmounting NFS Volume + ret = self.unmount_volume(self.mounts) + self.assertTrue(ret, "Volumes UnMount failed") + g.log.info("Volumes UnMounted successfully") + + # performing nfs.disable enable + self.nfs_options = {"nfs.disable": "enable"} + ret = set_volume_options(self.mnode, self.volname, self.nfs_options) + self.assertTrue(ret, "gluster volume set %s nfs.disable " + "enable failed" % self.volname) + g.log.info("gluster volume set %s nfs.disable " + "enabled successfully", self.volname) + + # Mounting a NFS volume + ret = self.mount_volume(self.mounts) + self.assertFalse(ret, "Volume mount should fail for %s, but volume " + "mounted successfully after nfs.disable on" + % self.volname) + g.log.info("Volume mount failed : %s", self.volname) + + # performing nfs.disable disable + self.nfs_options['nfs.disable'] = 'disable' + ret = set_volume_options(self.mnode, self.volname, self.nfs_options) + self.assertTrue(ret, "gluster volume set %s nfs.disable " + "disable failed" % self.volname) + g.log.info("gluster volume set %s nfs.disable " + "disabled successfully", self.volname) + + # Enabling server quorum + self.quorum_options = {'cluster.server-quorum-type': 'server'} + ret = set_volume_options(self.mnode, self.volname, self.quorum_options) + self.assertTrue(ret, "gluster volume set %s cluster.server-quorum-type" + " server Failed" % self.volname) + g.log.info("gluster volume set %s cluster.server-quorum-type server " + "enabled successfully", self.volname) + + # Setting Quorum ratio in percentage + self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'} + ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) + self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat" + "io percentage Failed :%s" % self.servers) + g.log.info("gluster volume set all cluster.server-quorum-ratio 51 " + "percentage enabled successfully on :%s", self.servers) + + # Setting quorum ration in numbers + self.quorum_perecent['cluster.server-quorum-ratio'] = "50" + ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) + self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat" + "io 50 Failed on :%s" % self.servers) + g.log.info("gluster volume set all cluster.server-quorum-ratio 50 enab" + "led successfully 0n :%s", self.servers) + + # Setting quorum ration in negative numbers + self.quorum_perecent['cluster.server-quorum-ratio'] = "-50" + ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) + self.assertFalse(ret, "gluster volume set all cluster.server-quorum-ra" + "tio should fail for negative numbers on :%s" % + self.servers) + g.log.info("gluster volume set all cluster.server-quorum-ratio Failed " + "for negative number on :%s", self.servers) + + # Setting quorum ration in negative percentage + self.quorum_perecent['cluster.server-quorum-ratio'] = "-51%" + ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) + self.assertFalse(ret, "gluster volume set all cluster.server-quorum-" + "ratio should fail for negative percentage on" + ":%s" % self.servers) + g.log.info("gluster volume set all cluster.server-quorum-ratio Failed " + "for negtive percentage on :%s", self.servers) + + # Setting quorum ration in fraction numbers + self.quorum_perecent['cluster.server-quorum-ratio'] = "1/2" + ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) + self.assertFalse(ret, "glustervolume set all cluster.server-quorum-" + "ratio should fail for fraction numbers :%s" + % self.servers) + g.log.info("gluster volume set all cluster.server-quorum-ratio " + "Failed for fraction number :%s", self.servers) + + # Setting quorum ration in negative fraction numbers + self.quorum_perecent['cluster.server-quorum-ratio'] = "-1/2" + ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) + self.assertFalse(ret, "glustervolume set all cluster.server-quorum-" + "ratio should fail for negative fraction numbers" + " :%s" % self.servers) + g.log.info("gluster volume set all cluster.server-quorum-ratio Failed " + "for negative fraction number :%s", self.servers) diff --git a/tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py b/tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py deleted file mode 100644 index 64526b0ee..000000000 --- a/tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" Description: - Test Cases for performing NFS disable, enable and - performing NFS mount and unmoount on all volumes, - performing different types quorum settings -""" -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.volume_ops import set_volume_options - - -@runs_on([['distributed', 'replicated', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], ['nfs']]) -class TestNfsMountAndServerQuorumSettings(GlusterBaseClass): - @classmethod - def setUpClass(cls): - GlusterBaseClass.setUpClass.im_func(cls) - g.log.info("Starting %s " % cls.__name__) - - # checking for peer status from every node - ret = cls.validate_peers_are_connected() - if not ret: - raise ExecutionError("Servers are not in peer probed state") - - def setUp(self): - """ - setUp method for every test - """ - # calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) - - # Creating Volume - g.log.info("Started creating volume") - ret = self.setup_volume() - if not ret: - raise ExecutionError("Volume creation failed: %s" % self.volname) - g.log.info("Volme created successfully : %s" % self.volname) - - def tearDown(self): - """ - tearDown for every test - """ - # stopping the volume and Cleaning up the volume - ret = self.cleanup_volume() - if not ret: - raise ExecutionError("Failed Cleanup the Volume %s" % self.volname) - g.log.info("Volume deleted successfully : %s" % self.volname) - - # Calling GlusterBaseClass tearDown - GlusterBaseClass.tearDown.im_func(self) - - def test_nfs_mount_quorum_settings(self): - """ - Set nfs.disable off - Mount it with nfs and unmount it - set nfs.disable enable - Mount it with nfs - Set nfs.disable disable - Enable server quorum - Set the quorum ratio to numbers and percentage, - negative- numbers should fail, negative percentage should fail, - fraction should fail, negative fraction should fail - """ - - # Mounting a NFS volume - ret = self.mount_volume(self.mounts) - self.assertTrue(ret, "NFS volume mount failed for %s" % self.volname) - g.log.info("Volume mounted sucessfully : %s" % self.volname) - - # unmounting NFS Volume - ret = self.unmount_volume(self.mounts) - self.assertTrue(ret, "Volumes UnMount failed") - g.log.info("Volumes UnMounted successfully") - - # performing nfs.disable enable - self.nfs_options = {"nfs.disable": "enable"} - ret = set_volume_options(self.mnode, self.volname, self.nfs_options) - self.assertTrue(ret, "gluster volume set %s nfs.disable " - "enable failed" % self.volname) - g.log.info("gluster volume set %s nfs.disable " - "enabled successfully" % self.volname) - - # Mounting a NFS volume - ret = self.mount_volume(self.mounts) - self.assertFalse(ret, "Volume mount should fail for %s, but volume " - "mounted successfully after nfs.disable on" - % self.volname) - g.log.info("Volume mount failed : %s" % self.volname) - - # performing nfs.disable disable - self.nfs_options['nfs.disable'] = 'disable' - ret = set_volume_options(self.mnode, self.volname, self.nfs_options) - self.assertTrue(ret, "gluster volume set %s nfs.disable " - "disable failed" % self.volname) - g.log.info("gluster volume set %s nfs.disable " - "disabled successfully" % self.volname) - - # Enabling server quorum - self.quorum_options = {'cluster.server-quorum-type': 'server'} - ret = set_volume_options(self.mnode, self.volname, self.quorum_options) - self.assertTrue(ret, "gluster volume set %s cluster.server-quorum-type" - " server Failed" % self.volname) - g.log.info("gluster volume set %s cluster.server-quorum-type server " - "enabled successfully" % self.volname) - - # Setting Quorum ratio in percentage - self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'} - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat" - "io percentage Failed :%s" % self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio 51 " - "percentage enabled successfully on :%s" % self.servers) - - # Setting quorum ration in numbers - self.quorum_perecent['cluster.server-quorum-ratio'] = "50" - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat" - "io 50 Failed on :%s" % self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio 50 enab" - "led successfully 0n :%s" % self.servers) - - # Setting quorum ration in negative numbers - self.quorum_perecent['cluster.server-quorum-ratio'] = "-50" - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - self.assertFalse(ret, "gluster volume set all cluster.server-quorum-ra" - "tio should fail for negative numbers on :%s" % - self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio Failed " - "for negative number on :%s" % self.servers) - - # Setting quorum ration in negative percentage - self.quorum_perecent['cluster.server-quorum-ratio'] = "-51%" - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - self.assertFalse(ret, "gluster volume set all cluster.server-quorum-" - "ratio should fail for negative percentage on" - ":%s" % self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio Failed " - "for negtive percentage on :%s" % self.servers) - - # Setting quorum ration in fraction numbers - self.quorum_perecent['cluster.server-quorum-ratio'] = "1/2" - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - self.assertFalse(ret, "glustervolume set all cluster.server-quorum-" - "ratio should fail for fraction numbers :%s" - % self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio " - "Failed for fraction number :%s" % self.servers) - - # Setting quorum ration in negative fraction numbers - self.quorum_perecent['cluster.server-quorum-ratio'] = "-1/2" - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - self.assertFalse(ret, "glustervolume set all cluster.server-quorum-" - "ratio should fail for negative fraction numbers" - " :%s" % self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio Failed " - "for negative fraction number :%s" % self.servers) diff --git a/tests/functional/glusterd/test_peer_detach.py b/tests/functional/glusterd/test_peer_detach.py index 0ed0e678c..2bae76d2a 100644 --- a/tests/functional/glusterd/test_peer_detach.py +++ b/tests/functional/glusterd/test_peer_detach.py @@ -14,8 +14,8 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -""" Description: - Test Cases in this module related to Glusterd peer detach. +""" +Test Cases in this module related to Glusterd peer detach. """ from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on @@ -28,6 +28,9 @@ from glustolibs.gluster.lib_utils import is_core_file_created @runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed', 'distributed-dispersed'], ['glusterfs']]) class PeerDetachVerification(GlusterBaseClass): + """ + Test that peer detach works as expected + """ @classmethod def setUpClass(cls): GlusterBaseClass.setUpClass.im_func(cls) @@ -38,14 +41,14 @@ class PeerDetachVerification(GlusterBaseClass): raise ExecutionError("Peer probe failed ") else: g.log.info("All server peers are already in connected state " - "%s:" % cls.servers) + "%s:", cls.servers) @classmethod def tearDownClass(cls): # stopping the volume and Cleaning up the volume ret = cls.cleanup_volume() if ret: - g.log.info("Volume deleted successfully : %s" % cls.volname) + g.log.info("Volume deleted successfully : %s", cls.volname) else: raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname) @@ -69,33 +72,33 @@ class PeerDetachVerification(GlusterBaseClass): self.invalid_ip = '10.11.a' # Peer detach to specified server - g.log.info("Start detach specified server :%s" % self.servers[1]) - ret, out, _ = peer_detach(self.mnode, self.servers[1]) + g.log.info("Start detach specified server :%s", self.servers[1]) + ret, _, _ = peer_detach(self.mnode, self.servers[1]) self.assertEqual(ret, 0, "Failed to detach server :%s" % self.servers[1]) # Detached server detaching again, Expected to fail detach g.log.info("Start detached server detaching " - "again : %s" % self.servers[1]) - ret, out, _ = peer_detach(self.mnode, self.servers[1]) + "again : %s", self.servers[1]) + ret, _, _ = peer_detach(self.mnode, self.servers[1]) self.assertNotEqual(ret, 0, "Detach server should " "fail :%s" % self.servers[1]) # Probing detached server - g.log.info("Start probing detached server : %s" % self.servers[1]) + g.log.info("Start probing detached server : %s", self.servers[1]) ret = peer_probe_servers(self.mnode, self.servers[1]) self.assertTrue(ret, "Peer probe failed from %s to other " "server : %s" % (self.mnode, self.servers[1])) # Detach invalid host - g.log.info("Start detaching invalid host :%s " % self.invalid_ip) - ret, out, _ = peer_detach(self.mnode, self.invalid_ip) + g.log.info("Start detaching invalid host :%s ", self.invalid_ip) + ret, _, _ = peer_detach(self.mnode, self.invalid_ip) self.assertNotEqual(ret, 0, "Detach invalid host should " "fail :%s" % self.invalid_ip) # Detach non exist host - g.log.info("Start detaching non exist host : %s" % self.non_exist_host) - ret, out, _ = peer_detach(self.mnode, self.non_exist_host) + g.log.info("Start detaching non exist host : %s", self.non_exist_host) + ret, _, _ = peer_detach(self.mnode, self.non_exist_host) self.assertNotEqual(ret, 0, "Detach non existing host " "should fail :%s" % self.non_exist_host) @@ -107,14 +110,14 @@ class PeerDetachVerification(GlusterBaseClass): "successfully") # Creating Volume - g.log.info("Started creating volume: %s" % self.volname) + g.log.info("Started creating volume: %s", self.volname) ret = self.setup_volume() self.assertTrue(ret, "Volume creation failed: %s" % self.volname) # Peer detach one node which contains the bricks of the volume created g.log.info("Start detaching server %s which is hosting " - "bricks of a volume" % self.servers[1]) - ret, out, err = peer_detach(self.mnode, self.servers[1]) + "bricks of a volume", self.servers[1]) + ret, _, err = peer_detach(self.mnode, self.servers[1]) self.assertNotEqual(ret, 0, "detach server should fail: %s" % self.servers[1]) msg = ('peer detach: failed: Brick(s) with the peer ' + @@ -124,8 +127,8 @@ class PeerDetachVerification(GlusterBaseClass): # Peer detach force a node which is hosting bricks of a volume g.log.info("start detaching server %s with force option " - "which is hosting bricks of a volume" % self.servers[1]) - ret, out, err = peer_detach(self.mnode, self.servers[1], force=True) + "which is hosting bricks of a volume", self.servers[1]) + ret, _, err = peer_detach(self.mnode, self.servers[1], force=True) self.assertNotEqual(ret, 0, "detach server should fail with force " "option : %s" % self.servers[1]) msg = ('peer detach: failed: Brick(s) with the peer ' + diff --git a/tests/functional/glusterd/test_probe_glusterd.py b/tests/functional/glusterd/test_probe_glusterd.py index 0b035c933..d14991dbd 100644 --- a/tests/functional/glusterd/test_probe_glusterd.py +++ b/tests/functional/glusterd/test_probe_glusterd.py @@ -29,7 +29,7 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass): @classmethod def setUpClass(cls): GlusterBaseClass.setUpClass.im_func(cls) - g.log.info("Starting %s " % cls.__name__) + g.log.info("Starting %s ", cls.__name__) def setUp(self): """ @@ -57,7 +57,7 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass): ''' ret, test_timestamp, _ = g.run_local('date +%s') test_timestamp = test_timestamp.strip() - g.log.info("Running Test : %s" % self.id()) + g.log.info("Running Test : %s", self.id()) # Assigning non existing ip to variable self.non_exist_ip = '256.256.256.256' @@ -70,21 +70,21 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass): # Peer probe checks for non existing host g.log.info("peer probe checking for non existing host") - ret, out, msg = peer_probe(self.mnode, self.non_exist_host) + ret, _, _ = peer_probe(self.mnode, self.non_exist_host) self.assertNotEqual(ret, 0, "peer probe should fail for " "non existhost: %s" % self.non_exist_host) g.log.info("peer probe failed for non existing host") # Peer probe checks for invalid ip g.log.info("peer probe checking for invalid ip") - ret, out, msg = peer_probe(self.mnode, self.invalid_ip) + ret, _, _ = peer_probe(self.mnode, self.invalid_ip) self.assertNotEqual(ret, 0, "peer probe shouldfail for " "invalid ip: %s" % self.invalid_ip) g.log.info("peer probe failed for invalid_ip") # peer probe checks for non existing ip g.log.info("peer probe checking for non existing ip") - ret, out, msg = peer_probe(self.mnode, self.non_exist_ip) + ret, _, _ = peer_probe(self.mnode, self.non_exist_ip) self.assertNotEqual(ret, 0, "peer probe should fail for non exist " "ip :%s" % self.non_exist_ip) g.log.info("peer probe failed for non existing ip") diff --git a/tests/functional/glusterd/test_quorum_related_messages_in_syslog.py b/tests/functional/glusterd/test_quorum_related_messages_in_syslog.py deleted file mode 100644 index 2b21a2a29..000000000 --- a/tests/functional/glusterd/test_quorum_related_messages_in_syslog.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" Description: - Test Cases in this module related to quorum - related messages in syslog, when there are more volumes. -""" - -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume) -from glustolibs.gluster.volume_ops import set_volume_options -from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd, - is_glusterd_running) -from time import sleep -import re - - -@runs_on([['distributed', 'replicated', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], ['glusterfs']]) -class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass): - @classmethod - def setUpClass(cls): - GlusterBaseClass.setUpClass.im_func(cls) - - # checking for peer status from every node - ret = cls.validate_peers_are_connected() - if not ret: - raise ExecutionError("Servers are not in peer probed state") - - def setUp(self): - """ - setUp method for every test - """ - # calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) - self.volume_list = [] - # create a volume - ret = setup_volume(self.mnode, self.all_servers_info, - self.volume) - self.volume_list.append(self.volname) - if not ret: - raise ExecutionError("Volume creation failed: %s" % self.volname) - - # Creating another volume - second_volume = "second_volume" - self.volume['name'] = second_volume - ret = setup_volume(self.mnode, self.all_servers_info, - self.volume) - self.volume_list.append(second_volume) - if not ret: - raise ExecutionError("Volume creation failed: %s" % second_volume) - - def tearDown(self): - """ - tearDown for every test - """ - if not self.glusterd_service: - ret = start_glusterd(self.servers[1]) - if not ret: - raise ExecutionError("Failed to start glusterd services " - "for : %s" % self.servers[1]) - - # Checking glusterd service running or not - ret = is_glusterd_running(self.servers[1]) - if (ret == 0): - g.log.info("glusterd running on :%s" % self.servers[1]) - else: - raise ExecutionError("glusterd not running on :%s" - % self.servers[1]) - - # In this test case performing quorum operations, - # deleting volumes immediately after glusterd services start, volume - # deletions are failing with quorum not met, - # that's the reason verifying peers are connected or not before - # deleting volumes - peers_not_connected = True - count = 0 - while(count < 10): - ret = self.validate_peers_are_connected() - if ret: - peers_not_connected = False - break - count += 1 - sleep(5) - if (peers_not_connected): - raise ExecutionError("Servers are not in peer probed state") - - # stopping the volume and Cleaning up the volume - for volume in self.volume_list: - ret = cleanup_volume(self.mnode, volume) - if not ret: - raise ExecutionError("Failed to Cleanup the " - "Volume %s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) - - # Calling GlusterBaseClass tearDown - GlusterBaseClass.tearDown.im_func(self) - - def test_quorum_messages_in_syslog_with_more_volumes(self): - """ - create two volumes - Set server quorum to both the volumes - set server quorum ratio 90% - stop glusterd service any one of the node - quorum regain message should be recorded with message id - 106002 - for both the volumes in /var/log/messages and - /var/log/glusterfs/glusterd.log - start the glusterd service of same node - quorum regain message should be recorded with message id - 106003 - for both the volumes in /var/log/messages and - /var/log/glusterfs/glusterd.log - """ - - self.log_messages = "/var/log/messages" - self.log_glusterd = "/var/log/glusterfs/glusterd.log" - - # Enabling server quorum all volumes - self.quorum_options = {'cluster.server-quorum-type': 'server'} - for volume in self.volume_list: - ret = set_volume_options(self.mnode, volume, self.quorum_options) - self.assertTrue(ret, "gluster volume set %s cluster.server" - "-quorum-type server Failed" % self.volname) - g.log.info("gluster volume set %s cluster.server-quorum" - "-type server enabled successfully" % self.volname) - - # Setting Quorum ratio in percentage - self.quorum_perecent = {'cluster.server-quorum-ratio': '91%'} - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - self.assertTrue(ret, "gluster volume set all cluster.server-quorum-" - "ratio percentage Failed :%s" % self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio 91 " - "percentage enabled successfully :%s" % self.servers) - - # counting quorum regain messages-id '106002' in /var/log/messages - # file, before glusterd services stop - cmd_messages = ' '.join(['grep -o', '106002', self.log_messages, - '| wc -l']) - ret, before_glusterd_stop_msgid_count, _ = g.run(self.mnode, - cmd_messages) - self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " - "106002 count in : %s" % self.log_messages) - - # counting quorum regain messages-id '106002' in - # /var/log/glusterfs/glusterd.log file, before glusterd services stop - cmd_glusterd = ' '.join(['grep -o', '106002', self.log_glusterd, - '| wc -l']) - ret, before_glusterd_stop_glusterd_id_count, _ = g.run(self.mnode, - cmd_glusterd) - self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " - "106002 count in :%s" % self.log_glusterd) - - # Stopping glusterd services - ret = stop_glusterd(self.servers[1]) - self.glusterd_service = False - self.assertTrue(ret, "Failed stop glusterd services : %s" - % self.servers[1]) - g.log.info("Stopped glusterd services successfully on: %s" - % self.servers[1]) - - # checking glusterd service stopped or not - ret = is_glusterd_running(self.servers[1]) - self.assertEqual(ret, 1, "glusterd service should be stopped") - - # counting quorum regain messages-id '106002' in /var/log/messages file - # after glusterd services stop. - count = 0 - msg_count = False - expected_msg_id_count = int(before_glusterd_stop_msgid_count) + 2 - while (count <= 10): - ret, after_glusterd_stop_msgid_count, _ = g.run(self.mnode, - cmd_messages) - if(re.search(r'\b' + str(expected_msg_id_count) + r'\b', - after_glusterd_stop_msgid_count)): - msg_count = True - break - sleep(5) - count += 1 - self.assertTrue(msg_count, "Failed to grep quorum regain message-id " - "106002 count in :%s" % self.log_messages) - - # counting quorum regain messages-id '106002' in - # /var/log/glusterfs/glusterd.log file after glusterd services stop - ret, after_glusterd_stop_glusterd_id_count, _ = g.run(self.mnode, - cmd_glusterd) - self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " - "106002 count in :%s" % self.log_glusterd) - - # Finding quorum regain message-id count difference between before - # and after glusterd services stop in /var/log/messages - count_diff = (int(after_glusterd_stop_msgid_count) - - int(before_glusterd_stop_msgid_count)) - - self.assertEqual(count_diff, 2, "Failed to record regain messages " - "in : %s" % self.log_messages) - g.log.info("regain messages recorded for two volumes " - "successfully after glusterd services stop " - ":%s" % self.log_messages) - - # Finding quorum regain message-id count difference between before - # and after glusterd services stop in /var/log/glusterfs/glusterd.log - count_diff = (int(after_glusterd_stop_glusterd_id_count) - - int(before_glusterd_stop_glusterd_id_count)) - self.assertEqual(count_diff, 2, "Failed to record regain messages in " - ": %s" % self.log_glusterd) - g.log.info("regain messages recorded for two volumes successfully " - "after glusterd services stop :%s" % self.log_glusterd) - - # counting quorum messages-id '106003' in a /var/log/messages file - # before glusterd services start - cmd_messages = ' '.join(['grep -o', '106003', self.log_messages, - '| wc -l']) - ret, before_glusterd_start_msgid_count, _ = g.run(self.mnode, - cmd_messages) - self.assertEqual(ret, 0, "Failed to grep quorum message-id 106003 " - "count in :%s" % self.log_messages) - - # counting quorum regain messages-id '106003' in - # /var/log/glusterfs/glusterd.log file before glusterd services start - cmd_glusterd = ' '.join(['grep -o', '106003', self.log_glusterd, - '| wc -l']) - ret, before_glusterd_start_glusterd_id_count, _ = g.run(self.mnode, - cmd_glusterd) - self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " - "106003 count in :%s" % self.log_glusterd) - - # Startin glusterd services - ret = start_glusterd(self.servers[1]) - self.glusterd_service = True - self.assertTrue(ret, "Failed to start glusterd " - "services: %s" % self.servers[1]) - - # Checking glusterd service running or not - ret = is_glusterd_running(self.servers[1]) - self.assertEqual(ret, 0, "glusterd service should be running") - - # counting quorum messages-id '106003' in a file in a - # /var/log/messages file after glusterd service start - count = 0 - expected_msg_id_count = int(before_glusterd_start_msgid_count) + 2 - msg_count = False - while(count <= 10): - ret, after_glusterd_start_msgid_count, _ = g.run(self.mnode, - cmd_messages) - if (re.search(r'\b' + str(expected_msg_id_count) + r'\b', - after_glusterd_start_msgid_count)): - msg_count = True - break - sleep(5) - count += 1 - - self.assertTrue(msg_count, "Failed to grep quorum message-id 106003 " - "count in :%s" % self.log_messages) - - # counting quorum regain messages-id '106003' in - # /var/log/glusterfs/glusterd.log file after glusterd services start - ret, after_glusterd_start_glusterd_id_count, _ = g.run(self.mnode, - cmd_glusterd) - self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " - "106003 count in :%s" % self.log_glusterd) - - # Finding quorum regain message-id count difference between before - # and after glusterd services start in /var/log/messages - count_diff = (int(after_glusterd_start_msgid_count) - - int(before_glusterd_start_msgid_count)) - self.assertEqual(count_diff, 2, "Failed to record regain " - "messages in :%s" % self.log_messages) - g.log.info("regain messages recorded for two volumes successfully " - "after glusterd services start in :%s" % self.log_messages) - - # Finding quorum regain message-id count difference between before - # and after glusterd services start in /var/log/glusterfs/glusterd.log - count_diff = (int(after_glusterd_start_glusterd_id_count) - - int(before_glusterd_start_glusterd_id_count)) - self.assertEqual(count_diff, 2, "Failed to record regain messages " - "in : %s" % self.log_glusterd) - g.log.info("regain messages recorded for two volumes successfully " - "after glusterd services start :%s" % self.log_glusterd) diff --git a/tests/functional/glusterd/test_quorum_syslog.py b/tests/functional/glusterd/test_quorum_syslog.py new file mode 100644 index 000000000..cefa328b8 --- /dev/null +++ b/tests/functional/glusterd/test_quorum_syslog.py @@ -0,0 +1,294 @@ +# Copyright (C) 2017-2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from time import sleep +import re + +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume) +from glustolibs.gluster.volume_ops import set_volume_options +from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd, + is_glusterd_running) + + +@runs_on([['distributed', 'replicated', 'distributed-replicated', + 'dispersed', 'distributed-dispersed'], ['glusterfs']]) +class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass): + """ + Test Cases in this module related to quorum + related messages in syslog, when there are more volumes. + """ + @classmethod + def setUpClass(cls): + GlusterBaseClass.setUpClass.im_func(cls) + + # checking for peer status from every node + ret = cls.validate_peers_are_connected() + if not ret: + raise ExecutionError("Servers are not in peer probed state") + + def setUp(self): + """ + setUp method for every test + """ + # calling GlusterBaseClass setUp + GlusterBaseClass.setUp.im_func(self) + self.volume_list = [] + # create a volume + ret = setup_volume(self.mnode, self.all_servers_info, + self.volume) + self.volume_list.append(self.volname) + if not ret: + raise ExecutionError("Volume creation failed: %s" % self.volname) + + # Creating another volume + second_volume = "second_volume" + self.volume['name'] = second_volume + ret = setup_volume(self.mnode, self.all_servers_info, + self.volume) + self.volume_list.append(second_volume) + if not ret: + raise ExecutionError("Volume creation failed: %s" % second_volume) + + def tearDown(self): + """ + tearDown for every test + """ + if not self.glusterd_service: + ret = start_glusterd(self.servers[1]) + if not ret: + raise ExecutionError("Failed to start glusterd services " + "for : %s" % self.servers[1]) + + # Checking glusterd service running or not + ret = is_glusterd_running(self.servers[1]) + if ret == 0: + g.log.info("glusterd running on :%s", self.servers[1]) + else: + raise ExecutionError("glusterd not running on :%s" + % self.servers[1]) + + # In this test case performing quorum operations, + # deleting volumes immediately after glusterd services start, volume + # deletions are failing with quorum not met, + # that's the reason verifying peers are connected or not before + # deleting volumes + peers_not_connected = True + count = 0 + while count < 10: + ret = self.validate_peers_are_connected() + if ret: + peers_not_connected = False + break + count += 1 + sleep(5) + if peers_not_connected: + raise ExecutionError("Servers are not in peer probed state") + + # stopping the volume and Cleaning up the volume + for volume in self.volume_list: + ret = cleanup_volume(self.mnode, volume) + if not ret: + raise ExecutionError("Failed to Cleanup the " + "Volume %s" % volume) + g.log.info("Volume deleted successfully : %s", volume) + + # Calling GlusterBaseClass tearDown + GlusterBaseClass.tearDown.im_func(self) + + def test_quorum_messages_in_syslog_with_more_volumes(self): + """ + create two volumes + Set server quorum to both the volumes + set server quorum ratio 90% + stop glusterd service any one of the node + quorum regain message should be recorded with message id - 106002 + for both the volumes in /var/log/messages and + /var/log/glusterfs/glusterd.log + start the glusterd service of same node + quorum regain message should be recorded with message id - 106003 + for both the volumes in /var/log/messages and + /var/log/glusterfs/glusterd.log + """ + # pylint: disable=too-many-locals + # pylint: disable=too-many-statements + + self.log_messages = "/var/log/messages" + self.log_glusterd = "/var/log/glusterfs/glusterd.log" + + # Enabling server quorum all volumes + self.quorum_options = {'cluster.server-quorum-type': 'server'} + for volume in self.volume_list: + ret = set_volume_options(self.mnode, volume, self.quorum_options) + self.assertTrue(ret, "gluster volume set %s cluster.server" + "-quorum-type server Failed" % self.volname) + g.log.info("gluster volume set %s cluster.server-quorum" + "-type server enabled successfully", self.volname) + + # Setting Quorum ratio in percentage + self.quorum_perecent = {'cluster.server-quorum-ratio': '91%'} + ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) + self.assertTrue(ret, "gluster volume set all cluster.server-quorum-" + "ratio percentage Failed :%s" % self.servers) + g.log.info("gluster volume set all cluster.server-quorum-ratio 91 " + "percentage enabled successfully :%s", self.servers) + + # counting quorum regain messages-id '106002' in /var/log/messages + # file, before glusterd services stop + cmd_messages = ' '.join(['grep -o', '106002', self.log_messages, + '| wc -l']) + ret, before_glusterd_stop_msgid_count, _ = g.run(self.mnode, + cmd_messages) + self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " + "106002 count in : %s" % self.log_messages) + + # counting quorum regain messages-id '106002' in + # /var/log/glusterfs/glusterd.log file, before glusterd services stop + cmd_glusterd = ' '.join(['grep -o', '106002', self.log_glusterd, + '| wc -l']) + ret, before_glusterd_stop_glusterd_id_count, _ = g.run(self.mnode, + cmd_glusterd) + self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " + "106002 count in :%s" % self.log_glusterd) + + # Stopping glusterd services + ret = stop_glusterd(self.servers[1]) + self.glusterd_service = False + self.assertTrue(ret, "Failed stop glusterd services : %s" + % self.servers[1]) + g.log.info("Stopped glusterd services successfully on: %s", + self.servers[1]) + + # checking glusterd service stopped or not + ret = is_glusterd_running(self.servers[1]) + self.assertEqual(ret, 1, "glusterd service should be stopped") + + # counting quorum regain messages-id '106002' in /var/log/messages file + # after glusterd services stop. + count = 0 + msg_count = False + expected_msg_id_count = int(before_glusterd_stop_msgid_count) + 2 + while count <= 10: + ret, after_glusterd_stop_msgid_count, _ = g.run(self.mnode, + cmd_messages) + if(re.search(r'\b' + str(expected_msg_id_count) + r'\b', + after_glusterd_stop_msgid_count)): + msg_count = True + break + sleep(5) + count += 1 + self.assertTrue(msg_count, "Failed to grep quorum regain message-id " + "106002 count in :%s" % self.log_messages) + + # counting quorum regain messages-id '106002' in + # /var/log/glusterfs/glusterd.log file after glusterd services stop + ret, after_glusterd_stop_glusterd_id_count, _ = g.run(self.mnode, + cmd_glusterd) + self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " + "106002 count in :%s" % self.log_glusterd) + + # Finding quorum regain message-id count difference between before + # and after glusterd services stop in /var/log/messages + count_diff = (int(after_glusterd_stop_msgid_count) - + int(before_glusterd_stop_msgid_count)) + + self.assertEqual(count_diff, 2, "Failed to record regain messages " + "in : %s" % self.log_messages) + g.log.info("regain messages recorded for two volumes " + "successfully after glusterd services stop " + ":%s", self.log_messages) + + # Finding quorum regain message-id count difference between before + # and after glusterd services stop in /var/log/glusterfs/glusterd.log + count_diff = (int(after_glusterd_stop_glusterd_id_count) - + int(before_glusterd_stop_glusterd_id_count)) + self.assertEqual(count_diff, 2, "Failed to record regain messages in " + ": %s" % self.log_glusterd) + g.log.info("regain messages recorded for two volumes successfully " + "after glusterd services stop :%s", self.log_glusterd) + + # counting quorum messages-id '106003' in a /var/log/messages file + # before glusterd services start + cmd_messages = ' '.join(['grep -o', '106003', self.log_messages, + '| wc -l']) + ret, before_glusterd_start_msgid_count, _ = g.run(self.mnode, + cmd_messages) + self.assertEqual(ret, 0, "Failed to grep quorum message-id 106003 " + "count in :%s" % self.log_messages) + + # counting quorum regain messages-id '106003' in + # /var/log/glusterfs/glusterd.log file before glusterd services start + cmd_glusterd = ' '.join(['grep -o', '106003', self.log_glusterd, + '| wc -l']) + ret, before_glusterd_start_glusterd_id_count, _ = g.run(self.mnode, + cmd_glusterd) + self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " + "106003 count in :%s" % self.log_glusterd) + + # Startin glusterd services + ret = start_glusterd(self.servers[1]) + self.glusterd_service = True + self.assertTrue(ret, "Failed to start glusterd " + "services: %s" % self.servers[1]) + + # Checking glusterd service running or not + ret = is_glusterd_running(self.servers[1]) + self.assertEqual(ret, 0, "glusterd service should be running") + + # counting quorum messages-id '106003' in a file in a + # /var/log/messages file after glusterd service start + count = 0 + expected_msg_id_count = int(before_glusterd_start_msgid_count) + 2 + msg_count = False + while count <= 10: + ret, after_glusterd_start_msgid_count, _ = g.run(self.mnode, + cmd_messages) + if (re.search(r'\b' + str(expected_msg_id_count) + r'\b', + after_glusterd_start_msgid_count)): + msg_count = True + break + sleep(5) + count += 1 + + self.assertTrue(msg_count, "Failed to grep quorum message-id 106003 " + "count in :%s" % self.log_messages) + + # counting quorum regain messages-id '106003' in + # /var/log/glusterfs/glusterd.log file after glusterd services start + ret, after_glusterd_start_glusterd_id_count, _ = g.run(self.mnode, + cmd_glusterd) + self.assertEqual(ret, 0, "Failed to grep quorum regain message-id " + "106003 count in :%s" % self.log_glusterd) + + # Finding quorum regain message-id count difference between before + # and after glusterd services start in /var/log/messages + count_diff = (int(after_glusterd_start_msgid_count) - + int(before_glusterd_start_msgid_count)) + self.assertEqual(count_diff, 2, "Failed to record regain " + "messages in :%s" % self.log_messages) + g.log.info("regain messages recorded for two volumes successfully " + "after glusterd services start in :%s", self.log_messages) + + # Finding quorum regain message-id count difference between before + # and after glusterd services start in /var/log/glusterfs/glusterd.log + count_diff = (int(after_glusterd_start_glusterd_id_count) - + int(before_glusterd_start_glusterd_id_count)) + self.assertEqual(count_diff, 2, "Failed to record regain messages " + "in : %s" % self.log_glusterd) + g.log.info("regain messages recorded for two volumes successfully " + "after glusterd services start :%s", self.log_glusterd) diff --git a/tests/functional/glusterd/test_rebalance_new_node.py b/tests/functional/glusterd/test_rebalance_new_node.py new file mode 100644 index 000000000..a9cd0fea6 --- /dev/null +++ b/tests/functional/glusterd/test_rebalance_new_node.py @@ -0,0 +1,162 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from glusto.core import Glusto as g +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume) +from glustolibs.gluster.volume_ops import (get_volume_list) +from glustolibs.gluster.peer_ops import (peer_probe_servers, + peer_detach_servers, peer_probe) +from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.gluster.lib_utils import form_bricks_list +from glustolibs.gluster.brick_ops import add_brick +from glustolibs.gluster.rebalance_ops import (rebalance_start, + get_rebalance_status) +from glustolibs.gluster.mount_ops import is_mounted + + +@runs_on([['distributed'], ['glusterfs']]) +class TestRebalanceStatus(GlusterBaseClass): + + def setUp(self): + + GlusterBaseClass.setUp.im_func(self) + + # check whether peers are in connected state + ret = self.validate_peers_are_connected() + if not ret: + raise ExecutionError("Peers are not in connected state") + + # detach all the nodes + ret = peer_detach_servers(self.mnode, self.servers) + if not ret: + raise ExecutionError("Peer detach failed to all the servers from " + "the node.") + g.log.info("Peer detach SUCCESSFUL.") + + # Uploading file_dir script in all client direcotries + g.log.info("Upload io scripts to clients %s for running IO on " + "mounts", self.clients) + script_local_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + self.script_upload_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + ret = upload_scripts(self.clients, script_local_path) + if not ret: + raise ExecutionError("Failed to upload IO scripts to clients %s" % + self.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + self.clients) + + def tearDown(self): + + # unmount the volume + ret = self.unmount_volume(self.mounts) + self.assertTrue(ret, "Volume unmount failed for %s" % self.volname) + + # get volumes list and clean up all the volumes + vol_list = get_volume_list(self.mnode) + if vol_list is None: + raise ExecutionError("Error while getting vol list") + else: + for volume in vol_list: + ret = cleanup_volume(self.mnode, volume) + if ret is True: + g.log.info("Volume deleted successfully : %s", volume) + else: + raise ExecutionError("Failed Cleanup the" + " Volume %s" % volume) + + # peer probe all the servers + ret = peer_probe_servers(self.mnode, self.servers) + if not ret: + raise ExecutionError("Peer probe failed to all the servers from " + "the node.") + + GlusterBaseClass.tearDown.im_func(self) + + @classmethod + def tearDownClass(cls): + + # Calling GlusterBaseClass tearDown + GlusterBaseClass.tearDownClass.im_func(cls) + + def test_rebalance_status_from_newly_probed_node(self): + + # Peer probe first 3 servers + servers_info_from_three_nodes = {} + for server in self.servers[0:3]: + servers_info_from_three_nodes[ + server] = self.all_servers_info[server] + # Peer probe the first 3 servers + ret, _, _ = peer_probe(self.mnode, server) + self.assertEqual(ret, 0, "Peer probe failed to %s" % server) + + self.volume['servers'] = self.servers[0:3] + # create a volume using the first 3 nodes + ret = setup_volume(self.mnode, servers_info_from_three_nodes, + self.volume, force=True) + self.assertTrue(ret, "Failed to create" + "and start volume %s" % self.volname) + + # Mounting a volume + ret = self.mount_volume(self.mounts) + self.assertTrue(ret, "Volume mount failed for %s" % self.volname) + + # Checking volume mounted or not + ret = is_mounted(self.volname, self.mounts[0].mountpoint, self.mnode, + self.mounts[0].client_system, self.mount_type) + self.assertTrue(ret, "Volume not mounted on mount point: %s" + % self.mounts[0].mountpoint) + g.log.info("Volume %s mounted on %s", self.volname, + self.mounts[0].mountpoint) + + # run IOs + g.log.info("Starting IO on all mounts...") + self.counter = 1 + for mount_obj in self.mounts: + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) + cmd = ("python %s create_deep_dirs_with_files " + "--dirname-start-num %d " + "--dir-depth 10 " + "--dir-length 5 " + "--max-num-of-dirs 3 " + "--num-of-files 100 %s" % (self.script_upload_path, + self.counter, + mount_obj.mountpoint)) + ret = g.run(mount_obj.client_system, cmd) + self.assertEqual(ret, 0, "IO failed on %s" + % mount_obj.client_system) + self.counter = self.counter + 10 + + # add a brick to the volume and start rebalance + brick_to_add = form_bricks_list(self.mnode, self.volname, 1, + self.servers[0:3], + servers_info_from_three_nodes) + ret, _, _ = add_brick(self.mnode, self.volname, brick_to_add) + self.assertEqual(ret, 0, "Failed to add a brick to %s" % self.volname) + + ret, _, _ = rebalance_start(self.mnode, self.volname) + self.assertEqual(ret, 0, "Failed to start rebalance") + + # peer probe a new node from existing cluster + ret, _, _ = peer_probe(self.mnode, self.servers[3]) + self.assertEqual(ret, 0, "Peer probe failed") + + ret = get_rebalance_status(self.servers[3], self.volname) + self.assertIsNone(ret, "Failed to get rebalance status") diff --git a/tests/functional/glusterd/test_rebalance_status_from_new_node.py b/tests/functional/glusterd/test_rebalance_status_from_new_node.py deleted file mode 100644 index dd71bcc3d..000000000 --- a/tests/functional/glusterd/test_rebalance_status_from_new_node.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -from glusto.core import Glusto as g -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume) -from glustolibs.gluster.volume_ops import (get_volume_list) -from glustolibs.gluster.peer_ops import (peer_probe_servers, - peer_detach_servers, peer_probe) -from glustolibs.misc.misc_libs import upload_scripts -from glustolibs.gluster.lib_utils import form_bricks_list -from glustolibs.gluster.brick_ops import add_brick -from glustolibs.gluster.rebalance_ops import (rebalance_start, - get_rebalance_status) -from glustolibs.gluster.mount_ops import is_mounted - - -@runs_on([['distributed'], ['glusterfs']]) -class TestRebalanceStatus(GlusterBaseClass): - - def setUp(self): - - GlusterBaseClass.setUp.im_func(self) - - # check whether peers are in connected state - ret = self.validate_peers_are_connected() - if not ret: - raise ExecutionError("Peers are not in connected state") - - # detach all the nodes - ret = peer_detach_servers(self.mnode, self.servers) - if not ret: - raise ExecutionError("Peer detach failed to all the servers from " - "the node.") - g.log.info("Peer detach SUCCESSFUL.") - - # Uploading file_dir script in all client direcotries - g.log.info("Upload io scripts to clients %s for running IO on " - "mounts", self.clients) - script_local_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - self.script_upload_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - ret = upload_scripts(self.clients, script_local_path) - if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s", - self.clients) - g.log.info("Successfully uploaded IO scripts to clients %s", - self.clients) - - def tearDown(self): - - # unmount the volume - ret = self.unmount_volume(self.mounts) - self.assertTrue(ret, "Volume unmount failed for %s" % self.volname) - - # get volumes list and clean up all the volumes - vol_list = get_volume_list(self.mnode) - if vol_list is None: - raise ExecutionError("Error while getting vol list") - else: - for volume in vol_list: - ret = cleanup_volume(self.mnode, volume) - if ret is True: - g.log.info("Volume deleted successfully : %s" % volume) - else: - raise ExecutionError("Failed Cleanup the" - " Volume %s" % volume) - - # peer probe all the servers - ret = peer_probe_servers(self.mnode, self.servers) - if not ret: - raise ExecutionError("Peer probe failed to all the servers from " - "the node.") - - GlusterBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - - # Calling GlusterBaseClass tearDown - GlusterBaseClass.tearDownClass.im_func(cls) - - def test_rebalance_status_from_newly_probed_node(self): - - # Peer probe first 3 servers - servers_info_from_three_nodes = {} - for server in self.servers[0:3]: - servers_info_from_three_nodes[ - server] = self.all_servers_info[server] - # Peer probe the first 3 servers - ret, _, _ = peer_probe(self.mnode, server) - self.assertEqual(ret, 0, "Peer probe failed to %s" % server) - - self.volume['servers'] = self.servers[0:3] - # create a volume using the first 3 nodes - ret = setup_volume(self.mnode, servers_info_from_three_nodes, - self.volume, force=True) - self.assertTrue(ret, "Failed to create" - "and start volume %s" % self.volname) - - # Mounting a volume - ret = self.mount_volume(self.mounts) - self.assertTrue(ret, "Volume mount failed for %s" % self.volname) - - # Checking volume mounted or not - ret = is_mounted(self.volname, self.mounts[0].mountpoint, self.mnode, - self.mounts[0].client_system, self.mount_type) - self.assertTrue(ret, "Volume not mounted on mount point: %s" - % self.mounts[0].mountpoint) - g.log.info("Volume %s mounted on %s" % (self.volname, - self.mounts[0].mountpoint)) - - # run IOs - g.log.info("Starting IO on all mounts...") - self.counter = 1 - for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s", mount_obj.client_system, - mount_obj.mountpoint) - cmd = ("python %s create_deep_dirs_with_files " - "--dirname-start-num %d " - "--dir-depth 10 " - "--dir-length 5 " - "--max-num-of-dirs 3 " - "--num-of-files 100 %s" % (self.script_upload_path, - self.counter, - mount_obj.mountpoint)) - ret = g.run(mount_obj.client_system, cmd) - self.assertEqual(ret, 0, "IO failed on %s" - % mount_obj.client_system) - self.counter = self.counter + 10 - - # add a brick to the volume and start rebalance - brick_to_add = form_bricks_list(self.mnode, self.volname, 1, - self.servers[0:3], - servers_info_from_three_nodes) - ret, out, err = add_brick(self.mnode, self.volname, brick_to_add) - self.assertEqual(ret, 0, "Failed to add a brick to %s" % self.volname) - - ret, _, _ = rebalance_start(self.mnode, self.volname) - self.assertEqual(ret, 0, "Failed to start rebalance") - - # peer probe a new node from existing cluster - ret, _, _ = peer_probe(self.mnode, self.servers[3]) - self.assertEqual(ret, 0, "Peer probe failed") - - ret = get_rebalance_status(self.servers[3], self.volname) - self.assertIsNone(ret, "Failed to get rebalance status") diff --git a/tests/functional/glusterd/test_volume_create.py b/tests/functional/glusterd/test_volume_create.py index ad4997925..3a181597b 100644 --- a/tests/functional/glusterd/test_volume_create.py +++ b/tests/functional/glusterd/test_volume_create.py @@ -14,6 +14,7 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +import random from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.exceptions import ExecutionError @@ -28,11 +29,13 @@ from glustolibs.gluster.peer_ops import (peer_detach_servers, peer_probe, peer_detach) from glustolibs.gluster.lib_utils import form_bricks_list from glustolibs.gluster.gluster_init import start_glusterd, stop_glusterd -import random @runs_on([['distributed'], ['glusterfs']]) class TestVolumeCreate(GlusterBaseClass): + ''' + Test glusterd behavior with the gluster volume create command + ''' @classmethod def setUpClass(cls): @@ -68,12 +71,19 @@ class TestVolumeCreate(GlusterBaseClass): ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Unable to delete volume % s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) + g.log.info("Volume deleted successfully : %s", volume) GlusterBaseClass.tearDown.im_func(self) def test_volume_create(self): - + ''' + In this test case, volume create operations such as creating volume + with non existing brick path, already used brick, already existing + volume name, bring the bricks to online with volume start force, + creating a volume with bricks in another cluster, creating a volume + when one of the brick node is down are validated. + ''' + # pylint: disable=too-many-statements # create and start a volume self.volume['name'] = "first_volume" self.volname = "first_volume" @@ -157,15 +167,15 @@ class TestVolumeCreate(GlusterBaseClass): ret, _, _ = peer_probe(self.servers[0], self.servers[1]) self.assertEqual(ret, 0, "Peer probe from %s to %s is failed" % (self.servers[0], self.servers[1])) - g.log.info("Peer probe is success from %s to %s" - % (self.servers[0], self.servers[1])) + g.log.info("Peer probe is success from %s to %s", + self.servers[0], self.servers[1]) # form cluster 2 ret, _, _ = peer_probe(self.servers[2], self.servers[3]) self.assertEqual(ret, 0, "Peer probe from %s to %s is failed" % (self.servers[2], self.servers[3])) - g.log.info("Peer probe is success from %s to %s" - % (self.servers[2], self.servers[3])) + g.log.info("Peer probe is success from %s to %s", + self.servers[2], self.servers[3]) # Creating a volume with bricks which are part of another # cluster should fail diff --git a/tests/functional/glusterd/test_volume_delete.py b/tests/functional/glusterd/test_volume_delete.py index 4aa6dca24..e207bb4b0 100644 --- a/tests/functional/glusterd/test_volume_delete.py +++ b/tests/functional/glusterd/test_volume_delete.py @@ -14,6 +14,8 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +import re +import random from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on @@ -23,8 +25,6 @@ from glustolibs.gluster.volume_ops import (volume_stop) from glustolibs.gluster.brick_libs import get_all_bricks from glustolibs.gluster.gluster_init import stop_glusterd, start_glusterd from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected -import re -import random @runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed', @@ -66,7 +66,7 @@ class TestVolumeDelete(GlusterBaseClass): ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Unable to delete volume % s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) + g.log.info("Volume deleted successfully : %s", volume) GlusterBaseClass.tearDown.im_func(self) @@ -104,8 +104,8 @@ class TestVolumeDelete(GlusterBaseClass): self.assertEqual(ret, 0, "Volume stop failed") # try to delete the volume, it should fail - ret, out, err = g.run(self.mnode, "gluster volume delete %s " - "--mode=script" % self.volname) + ret, _, err = g.run(self.mnode, "gluster volume delete %s " + "--mode=script" % self.volname) self.assertNotEqual(ret, 0, "Volume delete succeeded when one of the" " brick node is down") if re.search(r'Some of the peers are down', err): diff --git a/tests/functional/glusterd/test_volume_get.py b/tests/functional/glusterd/test_volume_get.py index 75a155774..228b15209 100644 --- a/tests/functional/glusterd/test_volume_get.py +++ b/tests/functional/glusterd/test_volume_get.py @@ -14,8 +14,8 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -""" Description: - Test Cases in this module related to Gluster volume get functionality +""" +Test Cases in this module related to Gluster volume get functionality """ from glusto.core import Glusto as g @@ -85,6 +85,7 @@ class TestVolumeGet(GlusterBaseClass): gluster volume get all 12. Check for any cores in "cd /" """ + # pylint: disable=too-many-statements # time stamp of current test case ret, test_timestamp, _ = g.run_local('date +%s') @@ -92,8 +93,8 @@ class TestVolumeGet(GlusterBaseClass): # performing gluster volume get command for non exist volume io-cache self.non_exist_volume = "abc99" - ret, out, err = g.run(self.mnode, "gluster volume get %s io-cache" - % self.non_exist_volume) + ret, _, err = g.run(self.mnode, "gluster volume get %s io-cache" + % self.non_exist_volume) self.assertNotEqual(ret, 0, "gluster volume get command should fail " "for non existing volume with io-cache " "option :%s" % self.non_exist_volume) @@ -103,11 +104,11 @@ class TestVolumeGet(GlusterBaseClass): % self.non_exist_volume) g.log.info("gluster volume get command failed successfully for non " "existing volume with io-cache option" - ":%s" % self.non_exist_volume) + ":%s", self.non_exist_volume) # performing gluster volume get all command for non exist volume - ret, out, err = g.run(self.mnode, "gluster volume get " - "%s all" % self.non_exist_volume) + ret, _, err = g.run(self.mnode, "gluster volume get %s all" % + self.non_exist_volume) self.assertNotEqual(ret, 0, "gluster volume get command should fail " "for non existing volume %s with all " "option" % self.non_exist_volume) @@ -115,12 +116,12 @@ class TestVolumeGet(GlusterBaseClass): "volume with all option:%s" % self.non_exist_volume) g.log.info("gluster volume get command failed successfully for non " - "existing volume with all option :%s" - % self.non_exist_volume) + "existing volume with all option :%s", + self.non_exist_volume) # performing gluster volume get command for non exist volume - ret, out, err = g.run(self.mnode, "gluster volume get " - "%s" % self.non_exist_volume) + ret, _, err = g.run(self.mnode, "gluster volume get " + "%s" % self.non_exist_volume) self.assertNotEqual(ret, 0, "gluster volume get command should " "fail for non existing volume :%s" % self.non_exist_volume) @@ -128,10 +129,10 @@ class TestVolumeGet(GlusterBaseClass): self.assertIn(msg, err, "No proper error message for non existing " "volume :%s" % self.non_exist_volume) g.log.info("gluster volume get command failed successfully for non " - "existing volume :%s" % self.non_exist_volume) + "existing volume :%s", self.non_exist_volume) # performing gluster volume get command without any volume name given - ret, out, err = g.run(self.mnode, "gluster volume get") + ret, _, err = g.run(self.mnode, "gluster volume get") self.assertNotEqual(ret, 0, "gluster volume get command should fail") self.assertIn(msg, err, "No proper error message for gluster " "volume get command") @@ -139,7 +140,7 @@ class TestVolumeGet(GlusterBaseClass): # performing gluster volume get io-cache command # without any volume name given - ret, out, err = g.run(self.mnode, "gluster volume get io-cache") + ret, _, err = g.run(self.mnode, "gluster volume get io-cache") self.assertNotEqual(ret, 0, "gluster volume get io-cache command " "should fail") self.assertIn(msg, err, "No proper error message for gluster volume " @@ -147,8 +148,8 @@ class TestVolumeGet(GlusterBaseClass): g.log.info("gluster volume get io-cache command failed successfully") # gluster volume get volname with non existing option - ret, out, err = g.run(self.mnode, "gluster volume " - "get %s temp.key" % self.volname) + ret, _, err = g.run(self.mnode, "gluster volume " + "get %s temp.key" % self.volname) self.assertNotEqual(ret, 0, "gluster volume get command should fail " "for existing volume %s with non-existing " "option" % self.volname) @@ -157,8 +158,8 @@ class TestVolumeGet(GlusterBaseClass): "volume %s with non-existing option" % self.volname) g.log.info("gluster volume get command failed successfully for " - "existing volume %s with non existing option" - % self.volname) + "existing volume %s with non existing option", + self.volname) # perfroming gluster volume get volname all @@ -166,7 +167,7 @@ class TestVolumeGet(GlusterBaseClass): self.assertIsNotNone(ret, "gluster volume get %s all command " "failed" % self.volname) g.log.info("gluster volume get %s all command executed " - "successfully" % self.volname) + "successfully", self.volname) # performing gluster volume get volname io-cache ret = get_volume_options(self.mnode, self.volname, "io-cache") @@ -182,8 +183,8 @@ class TestVolumeGet(GlusterBaseClass): self.assertTrue(ret, "gluster volume set %s performance.low-prio-" "threads failed" % self.volname) g.log.info("gluster volume set %s " - "performance.low-prio-threads executed successfully" - % self.volname) + "performance.low-prio-threads executed successfully", + self.volname) # Performing gluster volume get all, checking low-prio threads value ret = get_volume_options(self.mnode, self.volname, "all") @@ -198,7 +199,7 @@ class TestVolumeGet(GlusterBaseClass): self.assertIsNotNone(ret, "gluster volume get %s all command " "failed" % self.volname) g.log.info("gluster volume get %s all command executed " - "successfully" % self.volname) + "successfully", self.volname) # Checking core file created or not in "/" directory ret = is_core_file_created(self.servers, test_timestamp) diff --git a/tests/functional/glusterd/test_volume_op.py b/tests/functional/glusterd/test_volume_op.py deleted file mode 100644 index 93851e011..000000000 --- a/tests/functional/glusterd/test_volume_op.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.volume_ops import (volume_create, volume_start, - volume_stop, volume_delete, - get_volume_list, get_volume_info) -from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume) -from glustolibs.gluster.peer_ops import (peer_probe, peer_detach) -from glustolibs.gluster.lib_utils import form_bricks_list - - -@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed', - 'distributed-dispersed'], ['glusterfs']]) -class TestVolumeOperations(GlusterBaseClass): - - @classmethod - def setUpClass(cls): - - # Calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - - # check whether peers are in connected state - ret = cls.validate_peers_are_connected() - if not ret: - raise ExecutionError("Peers are not in connected state") - - def tearDown(self): - - vol_list = get_volume_list(self.mnode) - if vol_list is None: - raise ExecutionError("Failed to get the volume list") - - for volume in vol_list: - ret = cleanup_volume(self.mnode, volume) - if not ret: - raise ExecutionError("Unable to delete volume % s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) - - GlusterBaseClass.tearDown.im_func(self) - - def test_volume_op(self): - - # Starting a non existing volume should fail - ret, _, _ = volume_start(self.mnode, "no_vol", force=True) - self.assertNotEqual(ret, 0, "Expected: It should fail to Start a non" - " existing volume. Actual: Successfully started " - "a non existing volume") - g.log.info("Starting a non existing volume is failed") - - # Stopping a non existing volume should fail - ret, _, _ = volume_stop(self.mnode, "no_vol", force=True) - self.assertNotEqual(ret, 0, "Expected: It should fail to stop " - "non-existing volume. Actual: Successfully " - "stopped a non existing volume") - g.log.info("Stopping a non existing volume is failed") - - # Deleting a non existing volume should fail - ret = volume_delete(self.mnode, "no_vol") - self.assertTrue(ret, "Expected: It should fail to delete a " - "non existing volume. Actual:Successfully deleted " - "a non existing volume") - g.log.info("Deleting a non existing volume is failed") - - # Detach a server and try to create volume with node - # which is not in cluster - ret, _, _ = peer_detach(self.mnode, self.servers[1]) - self.assertEqual(ret, 0, ("Peer detach is failed")) - g.log.info("Peer detach is successful") - - num_of_bricks = len(self.servers) - bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks, - self.servers, self.all_servers_info) - - ret, _, _ = volume_create(self.mnode, self.volname, bricks_list) - self.assertNotEqual(ret, 0, "Successfully created volume with brick " - "from which is not a part of node") - g.log.info("Creating a volume with brick from node which is not part " - "of cluster is failed") - - # Peer probe the detached server - ret, _, _ = peer_probe(self.mnode, self.servers[1]) - self.assertEqual(ret, 0, ("Peer probe is failed")) - g.log.info("Peer probe is successful") - - # Create and start a volume - ret = setup_volume(self.mnode, self.all_servers_info, self.volume, - force=True) - self.assertTrue(ret, "Failed to create the volume") - g.log.info("Successfully created and started the volume") - - # Starting already started volume should fail - ret, _, _ = volume_start(self.mnode, self.volname) - self.assertNotEqual(ret, 0, "Expected: It should fail to start a " - "already started volume. Actual:Successfully" - " started a already started volume ") - g.log.info("Starting a already started volume is Failed.") - - # Deleting a volume without stopping should fail - ret = volume_delete(self.mnode, self.volname) - self.assertFalse(ret, ("Expected: It should fail to delete a volume" - " without stopping. Actual: Successfully " - "deleted a volume without stopping it")) - g.log.error("Failed to delete a volume without stopping it") - - # Stopping a volume should succeed - ret, _, _ = volume_stop(self.mnode, self.volname) - self.assertEqual(ret, 0, ("volume stop is failed")) - g.log.info("Volume stop is success") - - # Stopping a already stopped volume should fail - ret, _, _ = volume_stop(self.mnode, self.volname) - self.assertNotEqual(ret, 0, "Expected: It should fail to stop a " - "already stopped volume . Actual: Successfully" - "stopped a already stopped volume") - g.log.info("Volume stop is failed on already stopped volume") - - # Deleting a volume should succeed - ret = volume_delete(self.mnode, self.volname) - self.assertTrue(ret, ("Volume delete is failed")) - g.log.info("Volume delete is success") - - # Deleting a non existing volume should fail - ret = volume_delete(self.mnode, self.volname) - self.assertTrue(ret, "Expected: It should fail to delete a non " - "existing volume. Actual:Successfully deleted a " - "non existing volume") - g.log.info("Volume delete is failed for non existing volume") - - # Volume info command should succeed - ret = get_volume_info(self.mnode) - self.assertIsNotNone(ret, "volume info command failed") - g.log.info("Volume info command is success") diff --git a/tests/functional/glusterd/test_volume_operations.py b/tests/functional/glusterd/test_volume_operations.py index a8e75ad8b..fc8d8b0b6 100644 --- a/tests/functional/glusterd/test_volume_operations.py +++ b/tests/functional/glusterd/test_volume_operations.py @@ -14,27 +14,26 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +import random +import re +import os + from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.volume_ops import (volume_create, volume_start, - get_volume_list) + get_volume_list, volume_stop, + volume_delete, get_volume_info) + from glustolibs.gluster.brick_libs import (are_bricks_online) -from glustolibs.gluster.volume_libs import cleanup_volume +from glustolibs.gluster.volume_libs import cleanup_volume, setup_volume +from glustolibs.gluster.peer_ops import (peer_probe, peer_detach) from glustolibs.gluster.lib_utils import form_bricks_list from glustolibs.gluster.exceptions import ExecutionError -import random -import re -import os @runs_on([['distributed'], ['glusterfs']]) class TestVolumeCreate(GlusterBaseClass): - @classmethod - def setUpClass(cls): - # Calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - def setUp(self): GlusterBaseClass.setUp.im_func(self) # check whether peers are in connected state @@ -52,17 +51,11 @@ class TestVolumeCreate(GlusterBaseClass): for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: - raise ExecutionError("Unable to delete volume % s" % volume) - g.log.info("Volume deleted successfully : %s" % volume) + raise ExecutionError("Unable to delete volume %s" % volume) + g.log.info("Volume deleted successfully : %s", volume) GlusterBaseClass.tearDown.im_func(self) - @classmethod - def tearDownClass(cls): - - # Calling GlusterBaseClass tearDown - GlusterBaseClass.tearDownClass.im_func(cls) - def test_volume_start_force(self): # get the brick list and create a volume @@ -97,6 +90,7 @@ class TestVolumeCreate(GlusterBaseClass): g.log.info("Volume start force didn't bring the brick online") def test_volume_create_on_brick_root(self): + # pylint: disable=too-many-locals # try to create a volume on brick root path without using force and # with using force @@ -154,7 +148,7 @@ class TestVolumeCreate(GlusterBaseClass): ret, _, _ = g.run(server, cmd1) self.assertEqual(ret, 0, "Failed to delete the files") g.log.info("Successfully deleted the files") - ret, out, err = g.run(server, cmd2) + ret, out, _ = g.run(server, cmd2) if re.search("trusted.glusterfs.volume-id", out): ret, _, _ = g.run(server, cmd3) self.assertEqual(ret, 0, "Failed to delete the xattrs") @@ -167,3 +161,96 @@ class TestVolumeCreate(GlusterBaseClass): # creation of volume should succeed ret, _, _ = volume_create(self.mnode, self.volname, same_bricks_list) self.assertEqual(ret, 0, "Failed to create volume") + + def test_volume_op(self): + + # Starting a non existing volume should fail + ret, _, _ = volume_start(self.mnode, "no_vol", force=True) + self.assertNotEqual(ret, 0, "Expected: It should fail to Start a non" + " existing volume. Actual: Successfully started " + "a non existing volume") + g.log.info("Starting a non existing volume is failed") + + # Stopping a non existing volume should fail + ret, _, _ = volume_stop(self.mnode, "no_vol", force=True) + self.assertNotEqual(ret, 0, "Expected: It should fail to stop " + "non-existing volume. Actual: Successfully " + "stopped a non existing volume") + g.log.info("Stopping a non existing volume is failed") + + # Deleting a non existing volume should fail + ret = volume_delete(self.mnode, "no_vol") + self.assertTrue(ret, "Expected: It should fail to delete a " + "non existing volume. Actual:Successfully deleted " + "a non existing volume") + g.log.info("Deleting a non existing volume is failed") + + # Detach a server and try to create volume with node + # which is not in cluster + ret, _, _ = peer_detach(self.mnode, self.servers[1]) + self.assertEqual(ret, 0, ("Peer detach is failed")) + g.log.info("Peer detach is successful") + + num_of_bricks = len(self.servers) + bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks, + self.servers, self.all_servers_info) + + ret, _, _ = volume_create(self.mnode, self.volname, bricks_list) + self.assertNotEqual(ret, 0, "Successfully created volume with brick " + "from which is not a part of node") + g.log.info("Creating a volume with brick from node which is not part " + "of cluster is failed") + + # Peer probe the detached server + ret, _, _ = peer_probe(self.mnode, self.servers[1]) + self.assertEqual(ret, 0, ("Peer probe is failed")) + g.log.info("Peer probe is successful") + + # Create and start a volume + ret = setup_volume(self.mnode, self.all_servers_info, self.volume, + force=True) + self.assertTrue(ret, "Failed to create the volume") + g.log.info("Successfully created and started the volume") + + # Starting already started volume should fail + ret, _, _ = volume_start(self.mnode, self.volname) + self.assertNotEqual(ret, 0, "Expected: It should fail to start a " + "already started volume. Actual:Successfully" + " started a already started volume ") + g.log.info("Starting a already started volume is Failed.") + + # Deleting a volume without stopping should fail + ret = volume_delete(self.mnode, self.volname) + self.assertFalse(ret, ("Expected: It should fail to delete a volume" + " without stopping. Actual: Successfully " + "deleted a volume without stopping it")) + g.log.error("Failed to delete a volume without stopping it") + + # Stopping a volume should succeed + ret, _, _ = volume_stop(self.mnode, self.volname) + self.assertEqual(ret, 0, ("volume stop is failed")) + g.log.info("Volume stop is success") + + # Stopping a already stopped volume should fail + ret, _, _ = volume_stop(self.mnode, self.volname) + self.assertNotEqual(ret, 0, "Expected: It should fail to stop a " + "already stopped volume . Actual: Successfully" + "stopped a already stopped volume") + g.log.info("Volume stop is failed on already stopped volume") + + # Deleting a volume should succeed + ret = volume_delete(self.mnode, self.volname) + self.assertTrue(ret, ("Volume delete is failed")) + g.log.info("Volume delete is success") + + # Deleting a non existing volume should fail + ret = volume_delete(self.mnode, self.volname) + self.assertTrue(ret, "Expected: It should fail to delete a non " + "existing volume. Actual:Successfully deleted a " + "non existing volume") + g.log.info("Volume delete is failed for non existing volume") + + # Volume info command should succeed + ret = get_volume_info(self.mnode) + self.assertIsNotNone(ret, "volume info command failed") + g.log.info("Volume info command is success") diff --git a/tests/functional/glusterd/test_volume_reset.py b/tests/functional/glusterd/test_volume_reset.py index 2bb8c4c24..f61fdaaba 100644 --- a/tests/functional/glusterd/test_volume_reset.py +++ b/tests/functional/glusterd/test_volume_reset.py @@ -15,13 +15,10 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ Description: - Test Cases in this module related to Glusterd volume reset validation - with bitd, scrub and snapd daemons running or not """ from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.peer_ops import peer_probe_servers from glustolibs.gluster.volume_libs import cleanup_volume from glustolibs.gluster.bitrot_ops import (enable_bitrot, is_bitd_running, is_scrub_process_running) @@ -31,33 +28,20 @@ from glustolibs.gluster.uss_ops import enable_uss, is_snapd_running @runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed', 'distributed-dispersed'], ['glusterfs']]) class GlusterdVolumeReset(GlusterBaseClass): + ''' + Test Cases in this module related to Glusterd volume reset validation + with bitd, scrub and snapd daemons running or not + ''' @classmethod def setUpClass(cls): GlusterBaseClass.setUpClass.im_func(cls) - g.log.info("Starting %s " % cls.__name__) - ''' - checking for peer status from every node, if peers are in not - connected state, performing peer probe. - ''' - ret = cls.validate_peers_are_connected() - if not ret: - ret = peer_probe_servers(cls.mnode, cls.servers) - if ret: - g.log.info("peers are connected successfully from %s to other \ - servers in severlist %s:" % (cls.mnode, cls.servers)) - else: - g.log.error("Peer probe failed from %s to other \ - servers in severlist %s:" % (cls.mnode, cls.servers)) - raise ExecutionError("Peer probe failed ") - else: - g.log.info("All server peers are already in connected state\ - %s:" % cls.servers) + g.log.info("Starting %s ", cls.__name__) # Creating Volume g.log.info("Started creating volume") ret = cls.setup_volume() if ret: - g.log.info("Volme created successfully : %s" % cls.volname) + g.log.info("Volme created successfully : %s", cls.volname) else: raise ExecutionError("Volume creation failed: %s" % cls.volname) @@ -71,9 +55,9 @@ class GlusterdVolumeReset(GlusterBaseClass): # command for volume reset g.log.info("started resetting volume") cmd = "gluster volume reset " + self.volname - ret, out, _ = g.run(self.mnode, cmd) - if (ret == 0): - g.log.info("volume restted successfully :%s" % self.volname) + ret, _, _ = g.run(self.mnode, cmd) + if ret == 0: + g.log.info("volume reset successfully :%s", self.volname) else: raise ExecutionError("Volume reset Failed :%s" % self.volname) @@ -89,7 +73,7 @@ class GlusterdVolumeReset(GlusterBaseClass): # stopping the volume and Cleaning up the volume ret = cleanup_volume(cls.mnode, cls.volname) if ret: - g.log.info("Volume deleted successfully : %s" % cls.volname) + g.log.info("Volume deleted successfully : %s", cls.volname) else: raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname) @@ -103,52 +87,47 @@ class GlusterdVolumeReset(GlusterBaseClass): -> Eanble Uss on same volume -> Reset the volume with force -> Verify all the daemons(BitD, Scrub & Uss) are running or not - :return: ''' # enable bitrot and scrub on volume g.log.info("Enabling bitrot") - ret, out, _ = enable_bitrot(self.mnode, self.volname) - self.assertEqual(ret, 0, "Failed to enable bitrot on\ - volume: %s" % self.volname) - g.log.info("Bitd and scrub daemons enabled\ - successfully on volume :%s" % self.volname) + ret, _, _ = enable_bitrot(self.mnode, self.volname) + self.assertEqual(ret, 0, "Failed to enable bitrot on volume: %s" % + self.volname) + g.log.info("Bitd and scrub daemons enabled successfully on volume :%s", + self.volname) # enable uss on volume g.log.info("Enabling snaphot(uss)") - ret, out, _ = enable_uss(self.mnode, self.volname) - self.assertEqual(ret, 0, "Failed to enable uss on\ - volume: %s" % self.volname) - g.log.info("uss enabled successfully on volume :%s" % self.volname) + ret, _, _ = enable_uss(self.mnode, self.volname) + self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" % + self.volname) + g.log.info("uss enabled successfully on volume :%s", self.volname) # Checks bitd, snapd, scrub daemons running or not g.log.info("checking snapshot, scrub and bitrot\ daemons running or not") for mnode in self.servers: ret = is_bitd_running(mnode, self.volname) - self.assertTrue(ret, "Bitrot Daemon\ - not running on %s server:" % mnode) + self.assertTrue(ret, "Bitrot Daemon not running on %s server:" + % mnode) ret = is_scrub_process_running(mnode, self.volname) - self.assertTrue(ret, "Scrub Daemon\ - not running on %s server:" % mnode) + self.assertTrue(ret, "Scrub Daemon not running on %s server:" + % mnode) ret = is_snapd_running(mnode, self.volname) - self.assertTrue(ret, "Snap Daemon\ - not running %s server:" % mnode) - g.log.info("bitd, scrub and snapd running\ - successflly on volume :%s" % self.volname) + self.assertTrue(ret, "Snap Daemon not running %s server:" % mnode) + g.log.info("bitd, scrub and snapd running successflly on volume :%s", + self.volname) # command for volume reset g.log.info("started resetting volume") cmd = "gluster volume reset " + self.volname - ret, out, _ = g.run(self.mnode, cmd) - self.assertEqual(ret, 0, "volume reset failed\ - for : %s" % self.volname) - g.log.info("volume resetted succefully :%s" % self.volname) + ret, _, _ = g.run(self.mnode, cmd) + self.assertEqual(ret, 0, "volume reset failed for : %s" % self.volname) + g.log.info("volume resetted succefully :%s", self.volname) - ''' - After volume reset snap daemon will not be running, - bitd and scrub deamons will be in running state. - ''' + # After volume reset snap daemon will not be running, + # bitd and scrub deamons will be in running state. g.log.info("checking snapshot, scrub and bitrot daemons\ running or not after volume reset") for mnode in self.servers: @@ -159,31 +138,30 @@ class GlusterdVolumeReset(GlusterBaseClass): self.assertTrue(ret, "Scrub Daemon\ not running on %s server:" % mnode) ret = is_snapd_running(mnode, self.volname) - self.assertFalse(ret, "Snap Daemon should not be\ - running on %s server after volume reset:" % mnode) - g.log.info("bitd and scrub daemons are running after volume reset\ - snapd is not running as expected on volume :%s" % self.volname) + self.assertFalse(ret, "Snap Daemon should not be running on %s " + "server after volume reset:" % mnode) + g.log.info("bitd and scrub daemons are running after volume reset " + "snapd is not running as expected on volume :%s", + self.volname) # enable uss on volume g.log.info("Enabling snaphot(uss)") - ret, out, _ = enable_uss(self.mnode, self.volname) - self.assertEqual(ret, 0, "Failed to enable\ - uss on volume: %s" % self.volname) - g.log.info("uss enabled successfully on volume :%s" % self.volname) + ret, _, _ = enable_uss(self.mnode, self.volname) + self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" % + self.volname) + g.log.info("uss enabled successfully on volume :%s", self.volname) # command for volume reset with force g.log.info("started resetting volume with force option") cmd = "gluster volume reset " + self.volname + " force" - ret, out, _ = g.run(self.mnode, cmd) + ret, _, _ = g.run(self.mnode, cmd) self.assertEqual(ret, 0, "volume reset fail\ for : %s" % self.volname) - g.log.info("Volume resetted sucessfully with\ - force option :%s" % self.volname) + g.log.info("Volume reset sucessfully with force option :%s", + self.volname) - ''' - After volume reset bitd, snapd, scrub daemons will not be running, - all three daemons will get die - ''' + # After volume reset bitd, snapd, scrub daemons will not be running, + # all three daemons will get die g.log.info("checking snapshot, scrub and bitrot daemons\ running or not after volume reset with force") for mnode in self.servers: @@ -196,5 +174,5 @@ class GlusterdVolumeReset(GlusterBaseClass): ret = is_snapd_running(mnode, self.volname) self.assertFalse(ret, "Snap Daemon should not be\ running on %s server after volume reset force:" % mnode) - g.log.info("After volume reset bitd, scrub and snapd are not running after\ - volume reset with force on volume :%s" % self.volname) + g.log.info("After volume reset bitd, scrub and snapd are not running " + "after volume reset with force on volume :%s", self.volname) diff --git a/tests/functional/glusterd/test_volume_status.py b/tests/functional/glusterd/test_volume_status.py index a1c0d1710..acfceb23b 100644 --- a/tests/functional/glusterd/test_volume_status.py +++ b/tests/functional/glusterd/test_volume_status.py @@ -14,18 +14,18 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -""" Description: - Test Cases in this module related to Glusterd volume status while - IOs in progress """ +Test Cases in this module related to Glusterd volume status while +IOs in progress +""" +import random +from time import sleep from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.misc.misc_libs import upload_scripts from glustolibs.io.utils import (validate_io_procs, wait_for_io_to_complete, list_all_files_and_dirs_mounts) -import random -from time import sleep @runs_on([['distributed', 'replicated', 'distributed-replicated', @@ -42,7 +42,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass): raise ExecutionError("Peer probe failed ") else: g.log.info("All server peers are already in connected state " - "%s:" % cls.servers) + "%s:", cls.servers) # Uploading file_dir script in all client direcotries g.log.info("Upload io scripts to clients %s for running IO on " @@ -53,7 +53,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass): "file_dir_ops.py") ret = upload_scripts(cls.clients, script_local_path) if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s", + raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) g.log.info("Successfully uploaded IO scripts to clients %s", cls.clients) @@ -69,7 +69,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass): g.log.info("Started creating volume") ret = self.setup_volume() if ret: - g.log.info("Volme created successfully : %s" % self.volname) + g.log.info("Volme created successfully : %s", self.volname) else: raise ExecutionError("Volume creation failed: %s" % self.volname) @@ -95,7 +95,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass): # unmounting the volume and Cleaning up the volume ret = self.unmount_volume_and_cleanup_volume(self.mounts) if ret: - g.log.info("Volume deleted successfully : %s" % self.volname) + g.log.info("Volume deleted successfully : %s", self.volname) else: raise ExecutionError("Failed Cleanup the Volume %s" % self.volname) @@ -117,7 +117,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass): # Mounting a volume ret = self.mount_volume(self.mounts) self.assertTrue(ret, "Volume mount failed for %s" % self.volname) - g.log.info("Volume mounted sucessfully : %s" % self.volname) + g.log.info("Volume mounted sucessfully : %s", self.volname) # After Mounting immediately writting IO's are failing some times, # thats why keeping sleep for 10 secs @@ -147,14 +147,15 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass): # performing "gluster volume status volname inode" command on # all cluster servers randomly while io is in progress, # this command should not get hang while io is in progress + # pylint: disable=unused-variable for i in range(20): - ret, out, err = g.run(random.choice(self.servers), - "gluster --timeout=12000 volume status %s " - "inode" % self.volname) + ret, _, _ = g.run(random.choice(self.servers), + "gluster --timeout=12000 volume status %s " + "inode" % self.volname) self.assertEqual(ret, 0, ("Volume status 'inode' failed on " "volume %s" % self.volname)) g.log.info("Successful in logging volume status" - "'inode' of volume %s" % self.volname) + "'inode' of volume %s", self.volname) # Validate IO g.log.info("Wait for IO to complete and validate IO ...") diff --git a/tests/functional/nfs_ganesha/__init__.py b/tests/functional/nfs_ganesha/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/functional/nfs_ganesha/acls/test_nfs_ganesha_acls.py b/tests/functional/nfs_ganesha/acls/test_nfs_ganesha_acls.py deleted file mode 100644 index 871ad1090..000000000 --- a/tests/functional/nfs_ganesha/acls/test_nfs_ganesha_acls.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" Description: - Test Cases in this module tests the nfs ganesha version 4 - ACL functionality. -""" - -from glusto.core import Glusto as g -from glustolibs.gluster.gluster_base_class import runs_on -from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaVolumeBaseClass -from glustolibs.gluster.nfs_ganesha_ops import enable_acl, disable_acl -from glustolibs.gluster.exceptions import ExecutionError -import time -import re - - -@runs_on([['replicated', 'distributed', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], - ['nfs']]) -class TestNfsGaneshaAcls(NfsGaneshaVolumeBaseClass): - """ - Tests to verify Nfs Ganesha v4 ACL stability - """ - - @classmethod - def setUpClass(cls): - NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls) - - def setUp(self): - ret = enable_acl(self.servers[0], self.volname) - if not ret: - raise ExecutionError("Failed to enable ACL on the nfs " - "ganesha cluster") - - def test_nfsv4_acls(self): - - source_file = ("/usr/share/glustolibs/io/scripts/nfs_ganesha/" - "nfsv4_acl_test.sh") - test_acl_file = "/tmp/nfsv4_acl_test.sh" - - for server in self.servers: - g.upload(server, source_file, "/tmp/", user="root") - - cmd = ("export ONLY_CREATE_USERS_AND_GROUPS=\"yes\";sh %s %s" - % (test_acl_file, "/tmp")) - ret, _, _ = g.run(server, cmd) - self.assertEqual(ret, 0, ("Failed to create users and groups " - "for running acl test in server %s" - % server)) - time.sleep(5) - - for client in self.clients: - g.upload(client, source_file, "/tmp/", user="root") - option_flag = 0 - for mount in self.mounts: - if mount.client_system == client: - mountpoint = mount.mountpoint - if "vers=4" not in mount.options: - option_flag = 1 - break - - if option_flag: - g.log.info("This acl test required mount option to be " - "vers=4 in %s" % client) - continue - - dirname = mountpoint + "/" + "testdir_" + client - cmd = "[ -d %s ] || mkdir %s" % (dirname, dirname) - ret, _, _ = g.run(client, cmd) - self.assertEqual(ret, 0, ("Failed to create dir %s for running " - "acl test" % dirname)) - - cmd = "sh %s %s" % (test_acl_file, dirname) - ret, out, _ = g.run(client, cmd) - self.assertEqual(ret, 0, ("Failed to execute acl test on %s" - % client)) - - g.log.info("ACL test output in %s : %s" % (client, out)) - acl_output = out.split('\n')[:-1] - for output in acl_output: - match = re.search("^OK.*", output) - if match is None: - self.assertTrue(False, ("Unexpected behaviour in acl " - "functionality in %s" % client)) - - cmd = "rm -rf %s" % dirname - ret, _, _ = g.run(client, cmd) - self.assertEqual(ret, 0, ("Failed to remove dir %s after running " - "acl test" % dirname)) - - def tearDown(self): - ret = disable_acl(self.servers[0], self.volname) - if not ret: - raise ExecutionError("Failed to disable ACL on nfs " - "ganesha cluster") - - @classmethod - def tearDownClass(cls): - NfsGaneshaVolumeBaseClass.tearDownClass.im_func(cls) diff --git a/tests/functional/nfs_ganesha/exports/test_nfs_ganesha_volume_exports.py b/tests/functional/nfs_ganesha/exports/test_nfs_ganesha_volume_exports.py deleted file mode 100644 index 0594d2118..000000000 --- a/tests/functional/nfs_ganesha/exports/test_nfs_ganesha_volume_exports.py +++ /dev/null @@ -1,563 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" Description: - Test Cases in this module tests the nfs ganesha exports, - refresh configs, cluster enable/disable functionality. -""" - -from glusto.core import Glusto as g -from glustolibs.gluster.gluster_base_class import runs_on -from glustolibs.gluster.nfs_ganesha_libs import ( - NfsGaneshaVolumeBaseClass, - wait_for_nfs_ganesha_volume_to_get_exported, - wait_for_nfs_ganesha_volume_to_get_unexported, - NfsGaneshaIOBaseClass) -from glustolibs.gluster.nfs_ganesha_ops import (enable_acl, disable_acl, - run_refresh_config, - enable_nfs_ganesha, - disable_nfs_ganesha, - export_nfs_ganesha_volume, - unexport_nfs_ganesha_volume) -from glustolibs.gluster.volume_ops import (volume_stop, volume_start, - get_volume_info) -from glustolibs.gluster.volume_libs import (get_volume_options, setup_volume, - cleanup_volume, is_volume_exported, - log_volume_info_and_status) -import time -from glustolibs.io.utils import (validate_io_procs, - list_all_files_and_dirs_mounts, - wait_for_io_to_complete) -from glustolibs.gluster.exceptions import ExecutionError -import os -import re - - -@runs_on([['replicated', 'distributed', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], - ['nfs']]) -class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass): - """ - Tests to verify Nfs Ganesha exports, cluster enable/disable - functionality. - """ - - @classmethod - def setUpClass(cls): - NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls) - - def test_nfs_ganesha_export_after_vol_restart(self): - """ - Tests script to check nfs-ganesha volume gets exported after - multiple volume restarts. - """ - - for i in range(5): - g.log.info("Testing nfs ganesha export after volume stop/start." - "Count : %s " % str(i)) - - # Stoping volume - ret = volume_stop(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) - - # Waiting for few seconds for volume unexport. Max wait time is - # 120 seconds. - ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode, - self.volname) - self.assertTrue(ret, ("Failed to unexport volume %s after " - "stopping volume" % self.volname)) - - # Starting volume - ret = volume_start(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) - - # Waiting for few seconds for volume export. Max wait time is - # 120 seconds. - ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode, - self.volname) - self.assertTrue(ret, ("Failed to export volume %s after " - "starting volume" % self.volname)) - - def test_nfs_ganesha_enable_disable_cluster(self): - """ - Tests script to check nfs-ganehsa volume gets exported after - multiple enable/disable of cluster. - """ - - for i in range(5): - g.log.info("Executing multiple enable/disable of nfs ganesha " - "cluster. Count : %s " % str(i)) - - ret, _, _ = disable_nfs_ganesha(self.mnode) - self.assertEqual(ret, 0, ("Failed to disable nfs-ganesha cluster")) - - time.sleep(2) - vol_option = get_volume_options(self.mnode, self.volname, - option='ganesha.enable') - if vol_option is None: - self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume" - " option for %s " % self.volume)) - - if vol_option['ganesha.enable'] != 'off': - self.assertTrue(False, ("Failed to unexport volume by default " - "after disabling cluster")) - - ret, _, _ = enable_nfs_ganesha(self.mnode) - self.assertEqual(ret, 0, ("Failed to enable nfs-ganesha cluster")) - - time.sleep(2) - vol_option = get_volume_options(self.mnode, self.volname, - option='ganesha.enable') - if vol_option is None: - self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume" - " option for %s " % self.volume)) - - if vol_option['ganesha.enable'] != 'off': - self.assertTrue(False, ("Volume %s is exported by default " - "after disable and enable of cluster" - "which is unexpected." - % self.volname)) - - # Export volume after disable and enable of cluster - ret, _, _ = export_nfs_ganesha_volume( - mnode=self.mnode, volname=self.volname) - self.assertEqual(ret, 0, ("Failed to export volume %s " - "after disable and enable of cluster" - % self.volname)) - time.sleep(5) - - # List the volume exports - _, _, _ = g.run(self.mnode, "showmount -e") - - @classmethod - def tearDownClass(cls): - - (NfsGaneshaVolumeBaseClass. - tearDownClass. - im_func(cls, - teardown_nfs_ganesha_cluster=False)) - - -@runs_on([['replicated', 'distributed', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], - ['nfs']]) -class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaIOBaseClass): - """ - Tests to verfiy nfs ganesha features when IO is in progress. - """ - - def test_nfs_ganesha_multiple_refresh_configs(self): - """ - Tests script to check nfs-ganehsa volume gets exported and IOs - are running after running multiple refresh configs. - """ - - self.acl_check_flag = False - - for i in range(6): - # Enabling/Disabling ACLs to modify the export configuration - # before running refresh config - if i % 2 == 0: - ret = disable_acl(self.mnode, self. volname) - self.assertTrue(ret, ("Failed to disable acl on %s" - % self.volname)) - self.acl_check_flag = False - else: - ret = enable_acl(self.mnode, self. volname) - self.assertTrue(ret, ("Failed to enable acl on %s" - % self.volname)) - self.acl_check_flag = True - - ret = run_refresh_config(self.mnode, self. volname) - self.assertTrue(ret, ("Failed to run refresh config" - "for volume %s" % self.volname)) - - time.sleep(2) - - # Validate IO - g.log.info("Wait for IO to complete and validate IO ...") - ret = validate_io_procs(self.all_mounts_procs, self.mounts) - self.io_validation_complete = True - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("IO is successful on all mounts") - - # List all files and dirs created - g.log.info("List all files and directories:") - ret = list_all_files_and_dirs_mounts(self.mounts) - self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") - - def tearDown(self): - if self.acl_check_flag: - ret = disable_acl(self.mnode, self. volname) - if not ret: - raise ExecutionError("Failed to disable acl on %s" - % self.volname) - self.acl_check_flag = False - - NfsGaneshaIOBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - - (NfsGaneshaIOBaseClass. - tearDownClass. - im_func(cls, - teardown_nfsganesha_cluster=False)) - - -@runs_on([['replicated', 'distributed', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], - ['nfs']]) -class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaIOBaseClass): - """ - Tests to verfiy multiple volumes gets exported when IO is in progress. - """ - - def test_nfs_ganesha_export_with_multiple_volumes(self): - """ - Testcase to verfiy multiple volumes gets exported when IO is in - progress. - """ - - for i in range(5): - self.volume['name'] = "nfsvol" + str(i) - self.volume['voltype']['type'] = 'distributed' - self.volume['voltype']['replica_count'] = 1 - self.volume['voltype']['dist_count'] = 2 - - # Create volume - ret = setup_volume(mnode=self.mnode, - all_servers_info=self.all_servers_info, - volume_config=self.volume, force=True) - if not ret: - self.assertTrue(ret, ("Setup volume %s failed" % self.volume)) - time.sleep(5) - - # Export volume with nfs ganesha, if it is not exported already - vol_option = get_volume_options(self.mnode, self.volume['name'], - option='ganesha.enable') - if vol_option is None: - self.assertTrue(False, ("Failed to get ganesha.enable volume" - " option for %s " - % self.volume['name'])) - if vol_option['ganesha.enable'] != 'on': - ret, out, err = export_nfs_ganesha_volume( - mnode=self.mnode, volname=self.volume['name']) - if ret != 0: - self.assertTrue(False, ("Failed to export volume %s " - "as NFS export" - % self.volume['name'])) - time.sleep(5) - else: - g.log.info("Volume %s is exported already" - % self.volume['name']) - - # Waiting for few seconds for volume export. Max wait time is - # 120 seconds. - ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode, - (self. - volume['name'])) - self.assertTrue(ret, ("Failed to export volume %s after " - "starting volume when IO is running on " - "another volume" % self.volume['name'])) - - # Log Volume Info and Status - ret = log_volume_info_and_status(self.mnode, self.volume['name']) - if not ret: - self.assertTrue(False, ("Logging volume %s info and status ", - "failed " % self.volume['name'])) - - # Validate IO - g.log.info("Wait for IO to complete and validate IO ...") - ret = validate_io_procs(self.all_mounts_procs, self.mounts) - self.io_validation_complete = True - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("IO is successful on all mounts") - - # List all files and dirs created - g.log.info("List all files and directories:") - ret = list_all_files_and_dirs_mounts(self.mounts) - self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") - - def tearDown(self): - - # Clean up the volumes created specific for this tests. - for i in range(5): - volname = "nfsvol" + str(i) - volinfo = get_volume_info(self.mnode, volname) - if volinfo is None or volname not in volinfo: - g.log.info("Volume %s does not exist in %s" - % (volname, self.mnode)) - continue - - # Unexport volume, if it is not unexported already - vol_option = get_volume_options(self.mnode, volname, - option='ganesha.enable') - if vol_option is None: - raise ExecutionError("Failed to get ganesha.enable volume " - " option for %s " % volname) - if vol_option['ganesha.enable'] != 'off': - if is_volume_exported(self.mnode, volname, "nfs"): - ret, out, err = unexport_nfs_ganesha_volume( - mnode=self.mnode, volname=volname) - if ret != 0: - raise ExecutionError("Failed to unexport volume %s " - % volname) - time.sleep(5) - else: - g.log.info("Volume %s is unexported already" - % volname) - - _, _, _ = g.run(self.mnode, "showmount -e") - - ret = cleanup_volume(mnode=self.mnode, volname=volname) - if not ret: - raise ExecutionError("cleanup volume %s failed" % volname) - - NfsGaneshaIOBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - - (NfsGaneshaIOBaseClass. - tearDownClass. - im_func(cls, - teardown_nfsganesha_cluster=False)) - - -@runs_on([['replicated', 'distributed', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], - ['nfs']]) -class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass): - """ - Tests to verfiy nfs ganesha sub directory exports. - """ - - def start_and_wait_for_io_to_complete(self): - """This module starts IO from clients and waits for io to complate. - Returns True, if io gets completed successfully. Otherwise, False - """ - - # Start IO on mounts - g.log.info("Starting IO on all mounts...") - self.all_mounts_procs = [] - for mount_obj in self.mounts: - cmd = ("python %s create_deep_dirs_with_files " - "--dirname-start-num %d " - "--dir-depth 2 " - "--dir-length 15 " - "--max-num-of-dirs 5 " - "--num-of-files 10 %s" % (self.script_upload_path, - self.counter, - mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - self.all_mounts_procs.append(proc) - self.counter = self.counter + 10 - self.io_validation_complete = False - - # Adding a delay of 15 seconds before test method starts. This - # is to ensure IO's are in progress and giving some time to fill data - time.sleep(15) - - # Validate IO - g.log.info("Wait for IO to complete and validate IO ...") - ret = validate_io_procs(self.all_mounts_procs, self.mounts) - self.io_validation_complete = True - if not ret: - g.log.error("IO failed on some of the clients") - return False - g.log.info("IO is successful on all mounts") - - # List all files and dirs created - g.log.info("List all files and directories:") - ret = list_all_files_and_dirs_mounts(self.mounts) - if not ret: - g.log.error("Failed to list all files and dirs") - return False - g.log.info("Listing all files and directories is successful") - return True - - def setUp(self): - """setUp writes data from all mounts and selects subdirectory - required for the test and unmount the existing mounts. - """ - - NfsGaneshaIOBaseClass.setUp.im_func(self) - - # Validate IO - g.log.info("Wait for IO to complete and validate IO ...") - ret = validate_io_procs(self.all_mounts_procs, self.mounts) - self.io_validation_complete = True - if not ret: - raise ExecutionError("IO failed on some of the clients") - g.log.info("IO is successful on all mounts") - - # List all files and dirs created - g.log.info("List all files and directories:") - ret = list_all_files_and_dirs_mounts(self.mounts) - if not ret: - raise ExecutionError("Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") - - mountpoint = self.mounts[0].mountpoint - client = self.mounts[0].client_system - - # Select the subdirectory required for the test. - cmd = "find %s -type d -links 2 | grep -ve '.trashcan'" % mountpoint - ret, out, err = g.run(client, cmd) - if ret != 0: - raise ExecutionError("Failed to list the deep level directories") - self.subdir_path = out.split("\n")[0] - - _rc = True - for mount_obj in self.mounts: - ret = mount_obj.unmount() - if not ret: - g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'", - mount_obj.server_system, mount_obj.volname, - mount_obj.client_system, mount_obj.mountpoint) - _rc = False - if not _rc: - raise ExecutionError("Unmount of all mounts are not " - "successful") - - def test_nfs_ganesha_subdirectory_mount_from_client_side(self): - """ - Tests script to verify nfs ganesha subdirectory mount from client side - succeeds and able to write IOs. - """ - - for mount_obj in self.mounts: - subdir_to_mount = self.subdir_path.replace(mount_obj.mountpoint, - '') - if not subdir_to_mount.startswith(os.path.sep): - subdir_to_mount = os.path.sep + subdir_to_mount - - mount_obj.volname = mount_obj.volname + subdir_to_mount - if not mount_obj.is_mounted(): - ret = mount_obj.mount() - self.assertTrue(ret, ("Unable to mount volume '%s:%s' " - "on '%s:%s'" - % (mount_obj.server_system, - mount_obj.volname, - mount_obj.client_system, - mount_obj.mountpoint))) - - ret = self.start_and_wait_for_io_to_complete() - self.assertTrue(ret, ("Failed to write IOs when sub directory is" - " mounted from client side")) - - def test_nfs_ganesha_subdirectory_mount_from_server_side(self): - """ - Tests script to verify nfs ganesha subdirectory mount from server - side succeeds and able to write IOs. - """ - subdir_to_mount = self.subdir_path.replace(self.mounts[0].mountpoint, - '') - if not subdir_to_mount.startswith(os.path.sep): - subdir_to_mount = os.path.sep + subdir_to_mount - - for mount_obj in self.mounts: - mount_obj.volname = mount_obj.volname + subdir_to_mount - - export_file = ("/var/run/gluster/shared_storage/nfs-ganesha/exports/" - "export.%s.conf" % self.volname) - cmd = ("sed -i s/'Path = .*'/'Path = \"\/%s\";'/g %s" - % (re.escape(self.mounts[0].volname), export_file)) - ret, _, _ = g.run(self.mnode, cmd) - self.assertEqual(ret, 0, ("Unable to change Path info to %s in %s" - % ("/" + self.mounts[0].volname, - export_file))) - - cmd = ("sed -i 's/volume=.*/& \\n volpath=\"%s\";/g' %s" - % (re.escape(subdir_to_mount), export_file)) - ret, _, _ = g.run(self.mnode, cmd) - self.assertEqual(ret, 0, ("Unable to add volpath info to %s in %s" - % ("/" + self.mounts[0].volname, - export_file))) - - cmd = ("sed -i s/'Pseudo=.*'/'Pseudo=\"\/%s\";'/g %s" - % (re.escape(self.mounts[0].volname), export_file)) - ret, _, _ = g.run(self.mnode, cmd) - self.assertEqual(ret, 0, ("Unable to change pseudo Path info to " - "%s in %s" % ("/" + self.mounts[0].volname, - export_file))) - - # Stop and start volume to take the modified export file to effect. - # Stoping volume - ret = volume_stop(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) - - # Waiting for few seconds for volume unexport. Max wait time is - # 120 seconds. - ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode, - self.volname) - self.assertTrue(ret, ("Failed to unexport volume %s after " - "stopping volume" % self.volname)) - - # Starting volume - ret = volume_start(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) - - # Waiting for few seconds for volume export. Max wait time is - # 120 seconds. - ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode, - (self.mounts[0]. - volname)) - self.assertTrue(ret, ("Failed to export sub directory %s after " - "starting volume" % self.mounts[0].volname)) - - for mount_obj in self.mounts: - if not mount_obj.is_mounted(): - ret = mount_obj.mount() - self.assertTrue(ret, ("Unable to mount volume '%s:%s' " - "on '%s:%s'" - % (mount_obj.server_system, - mount_obj.volname, - mount_obj.client_system, - mount_obj.mountpoint))) - - ret = self.start_and_wait_for_io_to_complete() - self.assertTrue(ret, ("Failed to write IOs when sub directory is" - " mounted from server side")) - - def tearDown(self): - """setUp starts the io from all the mounts. - IO creates deep dirs and files. - """ - - # Wait for IO to complete if io validation is not executed in the - # test method - if not self.io_validation_complete: - g.log.info("Wait for IO to complete as IO validation did not " - "succeed in test method") - ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts) - if not ret: - raise ExecutionError("IO failed on some of the clients") - g.log.info("IO is successful on all mounts") - - NfsGaneshaIOBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - - (NfsGaneshaIOBaseClass. - tearDownClass. - im_func(cls, - teardown_nfsganesha_cluster=False)) diff --git a/tests/functional/nfs_ganesha/sanity/test_nfs_ganesha_sanity.py b/tests/functional/nfs_ganesha/sanity/test_nfs_ganesha_sanity.py deleted file mode 100644 index a00c22a5a..000000000 --- a/tests/functional/nfs_ganesha/sanity/test_nfs_ganesha_sanity.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" Description: - Test Cases in this module test NFS-Ganesha Sanity. -""" -from glusto.core import Glusto as g -from glustolibs.gluster.gluster_base_class import runs_on -from glustolibs.gluster.nfs_ganesha_libs import ( - NfsGaneshaVolumeBaseClass, - NfsGaneshaIOBaseClass) - - -@runs_on([['replicated', 'distributed', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], - ['nfs']]) -class TestNfsGaneshaSanity(NfsGaneshaVolumeBaseClass): - """ - Tests to verify NFS Ganesha Sanity. - """ - - @classmethod - def setUpClass(cls): - NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls) - - def test_nfs_ganesha_HA_Basic_IO(self): - """ - Tests to create an HA cluster and run basic IO - """ - - # Starting IO on the mounts.Let's do iozone first. - for mount_obj in self.mounts: - # Make sure you run relevant setup playbooks,view README ! - g.log.info("Running iozone on %s", mount_obj.client_system) - cmd = ("cd %s ;iozone -a" % (mount_obj.mountpoint)) - ret, out, err = g.run(mount_obj.client_system, cmd) - if ret == 0: - g.log.info(" Iozone run successful") - else: - g.log.error("ERROR! Drastic Iozone error encountered !") - self.assertEqual(ret, 0, "Iozone run failed!") - - # Check for crashes after iozone run - g.log.info("Checking for Cluster Status after iozone run") - ret, out, err = g.run(self.servers[0], - "/usr/libexec/ganesha/ganesha-ha.sh --status" - " /var/run/gluster/shared_storage/nfs-ganesha") - - if "HEALTHY" in out: - g.log.info("Cluster is HEALTHY,Continuing..") - - else: - g.log.error("ERROR! Cluster unhealthy,check for cores!") - self.assertEqual(ret, 0, "Iozone run failed! Cluster Unhealthy") - - # Running kernel untar now,single loop for the sanity test - g.log.info("Running kernel untars now") - for mount_obj in self.mounts: - cmd = ("cd %s ;mkdir $(hostname);cd $(hostname);" - "wget https://www.kernel.org/pub/linux/kernel/v2.6" - "/linux-2.6.1.tar.gz;" - "tar xvf linux-2.6.1.tar.gz" % (mount_obj.mountpoint)) - ret, out, err = g.run(mount_obj.client_system, cmd) - if ret == 0: - g.log.info("Succesfully untared the tarball!") - else: - g.log.error("ERROR ! Kernel untar errored out!") - self.assertEqual(ret, 0, "Kernel untar failed!") - - # Check for crashes after kernel untar - g.log.info("Checking for Cluster Status after kernel untar") - ret, out, err = g.run(self.servers[0], - "/usr/libexec/ganesha/ganesha-ha.sh --status" - " /var/run/gluster/shared_storage/nfs-ganesha") - - if "HEALTHY" in out: - g.log.info("Cluster is HEALTHY,Continuing..") - - else: - g.log.error("ERROR! Cluster unhealthy after I/O,check for cores!") - self.assertEqual(ret, 0, "Cluster unhealthy after Kernel untar") - - @classmethod - def tearDownClass(cls): - (NfsGaneshaIOBaseClass. - tearDownClass. - im_func(cls, - teardown_nfsganesha_cluster=False)) diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py new file mode 100644 index 000000000..12a825c2e --- /dev/null +++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py @@ -0,0 +1,114 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" Description: + Test Cases in this module tests the nfs ganesha version 4 + ACL functionality. +""" + +from glusto.core import Glusto as g +from glustolibs.gluster.gluster_base_class import runs_on +from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaVolumeBaseClass +from glustolibs.gluster.nfs_ganesha_ops import enable_acl, disable_acl +from glustolibs.gluster.exceptions import ExecutionError +import time +import re + + +@runs_on([['replicated', 'distributed', 'distributed-replicated', + 'dispersed', 'distributed-dispersed'], + ['nfs']]) +class TestNfsGaneshaAcls(NfsGaneshaVolumeBaseClass): + """ + Tests to verify Nfs Ganesha v4 ACL stability + """ + + @classmethod + def setUpClass(cls): + NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls) + + def setUp(self): + ret = enable_acl(self.servers[0], self.volname) + if not ret: + raise ExecutionError("Failed to enable ACL on the nfs " + "ganesha cluster") + + def test_nfsv4_acls(self): + # pylint: disable=too-many-locals + + source_file = ("/usr/share/glustolibs/io/scripts/nfs_ganesha/" + "nfsv4_acl_test.sh") + test_acl_file = "/tmp/nfsv4_acl_test.sh" + + for server in self.servers: + g.upload(server, source_file, "/tmp/", user="root") + + cmd = ("export ONLY_CREATE_USERS_AND_GROUPS=\"yes\";sh %s %s" + % (test_acl_file, "/tmp")) + ret, _, _ = g.run(server, cmd) + self.assertEqual(ret, 0, ("Failed to create users and groups " + "for running acl test in server %s" + % server)) + time.sleep(5) + + for client in self.clients: + g.upload(client, source_file, "/tmp/", user="root") + option_flag = 0 + for mount in self.mounts: + if mount.client_system == client: + mountpoint = mount.mountpoint + if "vers=4" not in mount.options: + option_flag = 1 + break + + if option_flag: + g.log.info("This acl test required mount option to be " + "vers=4 in %s", client) + continue + + dirname = mountpoint + "/" + "testdir_" + client + cmd = "[ -d %s ] || mkdir %s" % (dirname, dirname) + ret, _, _ = g.run(client, cmd) + self.assertEqual(ret, 0, "Failed to create dir %s for running " + "acl test" % dirname) + + cmd = "sh %s %s" % (test_acl_file, dirname) + ret, out, _ = g.run(client, cmd) + self.assertEqual(ret, 0, ("Failed to execute acl test on %s" + % client)) + + g.log.info("ACL test output in %s : %s", client, out) + acl_output = out.split('\n')[:-1] + for output in acl_output: + match = re.search("^OK.*", output) + if match is None: + self.assertTrue(False, "Unexpected behaviour in acl " + "functionality in %s" % client) + + cmd = "rm -rf %s" % dirname + ret, _, _ = g.run(client, cmd) + self.assertEqual(ret, 0, "Failed to remove dir %s after running " + "acl test" % dirname) + + def tearDown(self): + ret = disable_acl(self.servers[0], self.volname) + if not ret: + raise ExecutionError("Failed to disable ACL on nfs " + "ganesha cluster") + + @classmethod + def tearDownClass(cls): + NfsGaneshaVolumeBaseClass.tearDownClass.im_func(cls) diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py index 4a414c09f..b6a1a4391 100644 --- a/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py +++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py @@ -84,10 +84,3 @@ class TestNfsGaneshaWithDifferentIOPatterns(NfsGaneshaVolumeBaseClass): # pcs status output _, _, _ = g.run(self.servers[0], "pcs status") - - @classmethod - def tearDownClass(cls): - (NfsGaneshaVolumeBaseClass. - tearDownClass. - im_func(cls, - teardown_nfs_ganesha_cluster=False)) diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py new file mode 100644 index 000000000..a00c22a5a --- /dev/null +++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py @@ -0,0 +1,101 @@ +# Copyright (C) 2017-2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" Description: + Test Cases in this module test NFS-Ganesha Sanity. +""" +from glusto.core import Glusto as g +from glustolibs.gluster.gluster_base_class import runs_on +from glustolibs.gluster.nfs_ganesha_libs import ( + NfsGaneshaVolumeBaseClass, + NfsGaneshaIOBaseClass) + + +@runs_on([['replicated', 'distributed', 'distributed-replicated', + 'dispersed', 'distributed-dispersed'], + ['nfs']]) +class TestNfsGaneshaSanity(NfsGaneshaVolumeBaseClass): + """ + Tests to verify NFS Ganesha Sanity. + """ + + @classmethod + def setUpClass(cls): + NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls) + + def test_nfs_ganesha_HA_Basic_IO(self): + """ + Tests to create an HA cluster and run basic IO + """ + + # Starting IO on the mounts.Let's do iozone first. + for mount_obj in self.mounts: + # Make sure you run relevant setup playbooks,view README ! + g.log.info("Running iozone on %s", mount_obj.client_system) + cmd = ("cd %s ;iozone -a" % (mount_obj.mountpoint)) + ret, out, err = g.run(mount_obj.client_system, cmd) + if ret == 0: + g.log.info(" Iozone run successful") + else: + g.log.error("ERROR! Drastic Iozone error encountered !") + self.assertEqual(ret, 0, "Iozone run failed!") + + # Check for crashes after iozone run + g.log.info("Checking for Cluster Status after iozone run") + ret, out, err = g.run(self.servers[0], + "/usr/libexec/ganesha/ganesha-ha.sh --status" + " /var/run/gluster/shared_storage/nfs-ganesha") + + if "HEALTHY" in out: + g.log.info("Cluster is HEALTHY,Continuing..") + + else: + g.log.error("ERROR! Cluster unhealthy,check for cores!") + self.assertEqual(ret, 0, "Iozone run failed! Cluster Unhealthy") + + # Running kernel untar now,single loop for the sanity test + g.log.info("Running kernel untars now") + for mount_obj in self.mounts: + cmd = ("cd %s ;mkdir $(hostname);cd $(hostname);" + "wget https://www.kernel.org/pub/linux/kernel/v2.6" + "/linux-2.6.1.tar.gz;" + "tar xvf linux-2.6.1.tar.gz" % (mount_obj.mountpoint)) + ret, out, err = g.run(mount_obj.client_system, cmd) + if ret == 0: + g.log.info("Succesfully untared the tarball!") + else: + g.log.error("ERROR ! Kernel untar errored out!") + self.assertEqual(ret, 0, "Kernel untar failed!") + + # Check for crashes after kernel untar + g.log.info("Checking for Cluster Status after kernel untar") + ret, out, err = g.run(self.servers[0], + "/usr/libexec/ganesha/ganesha-ha.sh --status" + " /var/run/gluster/shared_storage/nfs-ganesha") + + if "HEALTHY" in out: + g.log.info("Cluster is HEALTHY,Continuing..") + + else: + g.log.error("ERROR! Cluster unhealthy after I/O,check for cores!") + self.assertEqual(ret, 0, "Cluster unhealthy after Kernel untar") + + @classmethod + def tearDownClass(cls): + (NfsGaneshaIOBaseClass. + tearDownClass. + im_func(cls, + teardown_nfsganesha_cluster=False)) diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py new file mode 100644 index 000000000..06cd221ba --- /dev/null +++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py @@ -0,0 +1,556 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" Description: + Test Cases in this module tests the nfs ganesha exports, + refresh configs, cluster enable/disable functionality. +""" + +import time +import os +import re +from glusto.core import Glusto as g +from glustolibs.gluster.gluster_base_class import runs_on +from glustolibs.gluster.nfs_ganesha_libs import ( + NfsGaneshaVolumeBaseClass, + wait_for_nfs_ganesha_volume_to_get_exported, + wait_for_nfs_ganesha_volume_to_get_unexported, + NfsGaneshaIOBaseClass) +from glustolibs.gluster.nfs_ganesha_ops import (enable_acl, disable_acl, + run_refresh_config, + enable_nfs_ganesha, + disable_nfs_ganesha, + export_nfs_ganesha_volume, + unexport_nfs_ganesha_volume) +from glustolibs.gluster.volume_ops import (volume_stop, volume_start, + get_volume_info) +from glustolibs.gluster.volume_libs import (get_volume_options, setup_volume, + cleanup_volume, is_volume_exported, + log_volume_info_and_status) +from glustolibs.io.utils import (validate_io_procs, + list_all_files_and_dirs_mounts, + wait_for_io_to_complete) +from glustolibs.gluster.exceptions import ExecutionError + + +@runs_on([['replicated', 'distributed', 'distributed-replicated', + 'dispersed', 'distributed-dispersed'], + ['nfs']]) +class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass): + """ + Tests to verify Nfs Ganesha exports, cluster enable/disable + functionality. + """ + + @classmethod + def setUpClass(cls): + NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls) + + def test_nfs_ganesha_export_after_vol_restart(self): + """ + Tests script to check nfs-ganesha volume gets exported after + multiple volume restarts. + """ + + for i in range(5): + g.log.info("Testing nfs ganesha export after volume stop/start." + "Count : %s", str(i)) + + # Stoping volume + ret = volume_stop(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) + + # Waiting for few seconds for volume unexport. Max wait time is + # 120 seconds. + ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode, + self.volname) + self.assertTrue(ret, ("Failed to unexport volume %s after " + "stopping volume" % self.volname)) + + # Starting volume + ret = volume_start(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) + + # Waiting for few seconds for volume export. Max wait time is + # 120 seconds. + ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode, + self.volname) + self.assertTrue(ret, ("Failed to export volume %s after " + "starting volume" % self.volname)) + + def test_nfs_ganesha_enable_disable_cluster(self): + """ + Tests script to check nfs-ganehsa volume gets exported after + multiple enable/disable of cluster. + """ + + for i in range(5): + g.log.info("Executing multiple enable/disable of nfs ganesha " + "cluster. Count : %s ", str(i)) + + ret, _, _ = disable_nfs_ganesha(self.mnode) + self.assertEqual(ret, 0, ("Failed to disable nfs-ganesha cluster")) + + time.sleep(2) + vol_option = get_volume_options(self.mnode, self.volname, + option='ganesha.enable') + if vol_option is None: + self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume" + " option for %s " % self.volume)) + + self.assertEqual(vol_option.get('ganesha.enable'), 'off', "Failed " + "to unexport volume by default after disabling " + "cluster") + + ret, _, _ = enable_nfs_ganesha(self.mnode) + self.assertEqual(ret, 0, ("Failed to enable nfs-ganesha cluster")) + + time.sleep(2) + vol_option = get_volume_options(self.mnode, self.volname, + option='ganesha.enable') + if vol_option is None: + self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume" + " option for %s " % self.volume)) + + self.assertEqual(vol_option.get('ganesha.enable'), 'off', "Volume " + "%s is exported by default after disable and " + "enable of cluster which is unexpected." % + self.volname) + + # Export volume after disable and enable of cluster + ret, _, _ = export_nfs_ganesha_volume( + mnode=self.mnode, volname=self.volname) + self.assertEqual(ret, 0, ("Failed to export volume %s " + "after disable and enable of cluster" + % self.volname)) + time.sleep(5) + + # List the volume exports + _, _, _ = g.run(self.mnode, "showmount -e") + + @classmethod + def tearDownClass(cls): + + (NfsGaneshaVolumeBaseClass. + tearDownClass. + im_func(cls, + teardown_nfs_ganesha_cluster=False)) + + +@runs_on([['replicated', 'distributed', 'distributed-replicated', + 'dispersed', 'distributed-dispersed'], + ['nfs']]) +class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaIOBaseClass): + """ + Tests to verfiy nfs ganesha features when IO is in progress. + """ + + def test_nfs_ganesha_multiple_refresh_configs(self): + """ + Tests script to check nfs-ganehsa volume gets exported and IOs + are running after running multiple refresh configs. + """ + + self.acl_check_flag = False + + for i in range(6): + # Enabling/Disabling ACLs to modify the export configuration + # before running refresh config + if i % 2 == 0: + ret = disable_acl(self.mnode, self. volname) + self.assertTrue(ret, ("Failed to disable acl on %s" + % self.volname)) + self.acl_check_flag = False + else: + ret = enable_acl(self.mnode, self. volname) + self.assertTrue(ret, ("Failed to enable acl on %s" + % self.volname)) + self.acl_check_flag = True + + ret = run_refresh_config(self.mnode, self. volname) + self.assertTrue(ret, ("Failed to run refresh config" + "for volume %s" % self.volname)) + + time.sleep(2) + + # Validate IO + g.log.info("Wait for IO to complete and validate IO ...") + ret = validate_io_procs(self.all_mounts_procs, self.mounts) + self.io_validation_complete = True + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + # List all files and dirs created + g.log.info("List all files and directories:") + ret = list_all_files_and_dirs_mounts(self.mounts) + self.assertTrue(ret, "Failed to list all files and dirs") + g.log.info("Listing all files and directories is successful") + + def tearDown(self): + if self.acl_check_flag: + ret = disable_acl(self.mnode, self. volname) + if not ret: + raise ExecutionError("Failed to disable acl on %s" + % self.volname) + self.acl_check_flag = False + + NfsGaneshaIOBaseClass.tearDown.im_func(self) + + @classmethod + def tearDownClass(cls): + + (NfsGaneshaIOBaseClass. + tearDownClass. + im_func(cls, + teardown_nfsganesha_cluster=False)) + + +@runs_on([['replicated', 'distributed', 'distributed-replicated', + 'dispersed', 'distributed-dispersed'], + ['nfs']]) +class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaIOBaseClass): + """ + Tests to verfiy multiple volumes gets exported when IO is in progress. + """ + + def test_nfs_ganesha_export_with_multiple_volumes(self): + """ + Testcase to verfiy multiple volumes gets exported when IO is in + progress. + """ + + for i in range(5): + self.volume['name'] = "nfsvol" + str(i) + self.volume['voltype']['type'] = 'distributed' + self.volume['voltype']['replica_count'] = 1 + self.volume['voltype']['dist_count'] = 2 + + # Create volume + ret = setup_volume(mnode=self.mnode, + all_servers_info=self.all_servers_info, + volume_config=self.volume, force=True) + if not ret: + self.assertTrue(ret, ("Setup volume %s failed" % self.volume)) + time.sleep(5) + + # Export volume with nfs ganesha, if it is not exported already + vol_option = get_volume_options(self.mnode, self.volume['name'], + option='ganesha.enable') + self.assertIsNotNone(vol_option, "Failed to get ganesha.enable " + "volume option for %s" % self.volume['name']) + if vol_option['ganesha.enable'] != 'on': + ret, _, _ = export_nfs_ganesha_volume( + mnode=self.mnode, volname=self.volume['name']) + self.assertEqual(ret, 0, "Failed to export volume %s as NFS " + "export" % self.volume['name']) + time.sleep(5) + else: + g.log.info("Volume %s is exported already", + self.volume['name']) + + # Waiting for few seconds for volume export. Max wait time is + # 120 seconds. + ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode, + (self. + volume['name'])) + self.assertTrue(ret, ("Failed to export volume %s after " + "starting volume when IO is running on " + "another volume" % self.volume['name'])) + + # Log Volume Info and Status + ret = log_volume_info_and_status(self.mnode, self.volume['name']) + self.assertTrue(ret, "Logging volume %s info and status failed" + % self.volume['name']) + + # Validate IO + g.log.info("Wait for IO to complete and validate IO ...") + ret = validate_io_procs(self.all_mounts_procs, self.mounts) + self.io_validation_complete = True + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + # List all files and dirs created + g.log.info("List all files and directories:") + ret = list_all_files_and_dirs_mounts(self.mounts) + self.assertTrue(ret, "Failed to list all files and dirs") + g.log.info("Listing all files and directories is successful") + + def tearDown(self): + + # Clean up the volumes created specific for this tests. + for i in range(5): + volname = "nfsvol" + str(i) + volinfo = get_volume_info(self.mnode, volname) + if volinfo is None or volname not in volinfo: + g.log.info("Volume %s does not exist in %s", + volname, self.mnode) + continue + + # Unexport volume, if it is not unexported already + vol_option = get_volume_options(self.mnode, volname, + option='ganesha.enable') + if vol_option is None: + raise ExecutionError("Failed to get ganesha.enable volume " + " option for %s " % volname) + if vol_option['ganesha.enable'] != 'off': + if is_volume_exported(self.mnode, volname, "nfs"): + ret, _, _ = unexport_nfs_ganesha_volume( + mnode=self.mnode, volname=volname) + if ret != 0: + raise ExecutionError("Failed to unexport volume %s " + % volname) + time.sleep(5) + else: + g.log.info("Volume %s is unexported already", volname) + + _, _, _ = g.run(self.mnode, "showmount -e") + + ret = cleanup_volume(mnode=self.mnode, volname=volname) + if not ret: + raise ExecutionError("cleanup volume %s failed" % volname) + + NfsGaneshaIOBaseClass.tearDown.im_func(self) + + @classmethod + def tearDownClass(cls): + + (NfsGaneshaIOBaseClass. + tearDownClass. + im_func(cls, + teardown_nfsganesha_cluster=False)) + + +@runs_on([['replicated', 'distributed', 'distributed-replicated', + 'dispersed', 'distributed-dispersed'], + ['nfs']]) +class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass): + """ + Tests to verfiy nfs ganesha sub directory exports. + """ + + def start_and_wait_for_io_to_complete(self): + """This module starts IO from clients and waits for io to complate. + Returns True, if io gets completed successfully. Otherwise, False + """ + + # Start IO on mounts + g.log.info("Starting IO on all mounts...") + self.all_mounts_procs = [] + for mount_obj in self.mounts: + cmd = ("python %s create_deep_dirs_with_files " + "--dirname-start-num %d " + "--dir-depth 2 " + "--dir-length 15 " + "--max-num-of-dirs 5 " + "--num-of-files 10 %s" % (self.script_upload_path, + self.counter, + mount_obj.mountpoint)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + self.all_mounts_procs.append(proc) + self.counter = self.counter + 10 + self.io_validation_complete = False + + # Adding a delay of 15 seconds before test method starts. This + # is to ensure IO's are in progress and giving some time to fill data + time.sleep(15) + + # Validate IO + g.log.info("Wait for IO to complete and validate IO ...") + ret = validate_io_procs(self.all_mounts_procs, self.mounts) + self.io_validation_complete = True + if not ret: + g.log.error("IO failed on some of the clients") + return False + g.log.info("IO is successful on all mounts") + + # List all files and dirs created + g.log.info("List all files and directories:") + ret = list_all_files_and_dirs_mounts(self.mounts) + if not ret: + g.log.error("Failed to list all files and dirs") + return False + g.log.info("Listing all files and directories is successful") + return True + + def setUp(self): + """setUp writes data from all mounts and selects subdirectory + required for the test and unmount the existing mounts. + """ + + NfsGaneshaIOBaseClass.setUp.im_func(self) + + # Validate IO + g.log.info("Wait for IO to complete and validate IO ...") + ret = validate_io_procs(self.all_mounts_procs, self.mounts) + self.io_validation_complete = True + if not ret: + raise ExecutionError("IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + # List all files and dirs created + g.log.info("List all files and directories:") + ret = list_all_files_and_dirs_mounts(self.mounts) + if not ret: + raise ExecutionError("Failed to list all files and dirs") + g.log.info("Listing all files and directories is successful") + + mountpoint = self.mounts[0].mountpoint + client = self.mounts[0].client_system + + # Select the subdirectory required for the test. + cmd = "find %s -type d -links 2 | grep -ve '.trashcan'" % mountpoint + ret, out, _ = g.run(client, cmd) + if ret != 0: + raise ExecutionError("Failed to list the deep level directories") + self.subdir_path = out.split("\n")[0] + + _rc = True + for mount_obj in self.mounts: + ret = mount_obj.unmount() + if not ret: + g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'", + mount_obj.server_system, mount_obj.volname, + mount_obj.client_system, mount_obj.mountpoint) + _rc = False + if not _rc: + raise ExecutionError("Unmount of all mounts are not " + "successful") + + def test_nfs_ganesha_subdirectory_mount_from_client_side(self): + """ + Tests script to verify nfs ganesha subdirectory mount from client side + succeeds and able to write IOs. + """ + + for mount_obj in self.mounts: + subdir_to_mount = self.subdir_path.replace(mount_obj.mountpoint, + '') + if not subdir_to_mount.startswith(os.path.sep): + subdir_to_mount = os.path.sep + subdir_to_mount + + mount_obj.volname = mount_obj.volname + subdir_to_mount + if not mount_obj.is_mounted(): + ret = mount_obj.mount() + self.assertTrue(ret, ("Unable to mount volume '%s:%s' " + "on '%s:%s'" + % (mount_obj.server_system, + mount_obj.volname, + mount_obj.client_system, + mount_obj.mountpoint))) + + ret = self.start_and_wait_for_io_to_complete() + self.assertTrue(ret, ("Failed to write IOs when sub directory is" + " mounted from client side")) + + def test_nfs_ganesha_subdirectory_mount_from_server_side(self): + """ + Tests script to verify nfs ganesha subdirectory mount from server + side succeeds and able to write IOs. + """ + subdir_to_mount = self.subdir_path.replace(self.mounts[0].mountpoint, + '') + if not subdir_to_mount.startswith(os.path.sep): + subdir_to_mount = os.path.sep + subdir_to_mount + + for mount_obj in self.mounts: + mount_obj.volname = mount_obj.volname + subdir_to_mount + + export_file = ("/var/run/gluster/shared_storage/nfs-ganesha/exports/" + "export.%s.conf" % self.volname) + cmd = ("sed -i s/'Path = .*'/'Path = \"\/%s\";'/g %s" + % (re.escape(self.mounts[0].volname), export_file)) + ret, _, _ = g.run(self.mnode, cmd) + self.assertEqual(ret, 0, ("Unable to change Path info to %s in %s" + % ("/" + self.mounts[0].volname, + export_file))) + + cmd = ("sed -i 's/volume=.*/& \\n volpath=\"%s\";/g' %s" + % (re.escape(subdir_to_mount), export_file)) + ret, _, _ = g.run(self.mnode, cmd) + self.assertEqual(ret, 0, ("Unable to add volpath info to %s in %s" + % ("/" + self.mounts[0].volname, + export_file))) + + cmd = ("sed -i s/'Pseudo=.*'/'Pseudo=\"\/%s\";'/g %s" + % (re.escape(self.mounts[0].volname), export_file)) + ret, _, _ = g.run(self.mnode, cmd) + self.assertEqual(ret, 0, ("Unable to change pseudo Path info to " + "%s in %s" % ("/" + self.mounts[0].volname, + export_file))) + + # Stop and start volume to take the modified export file to effect. + # Stoping volume + ret = volume_stop(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) + + # Waiting for few seconds for volume unexport. Max wait time is + # 120 seconds. + ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode, + self.volname) + self.assertTrue(ret, ("Failed to unexport volume %s after " + "stopping volume" % self.volname)) + + # Starting volume + ret = volume_start(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) + + # Waiting for few seconds for volume export. Max wait time is + # 120 seconds. + ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode, + (self.mounts[0]. + volname)) + self.assertTrue(ret, ("Failed to export sub directory %s after " + "starting volume" % self.mounts[0].volname)) + + for mount_obj in self.mounts: + if not mount_obj.is_mounted(): + ret = mount_obj.mount() + self.assertTrue(ret, ("Unable to mount volume '%s:%s' " + "on '%s:%s'" + % (mount_obj.server_system, + mount_obj.volname, + mount_obj.client_system, + mount_obj.mountpoint))) + + ret = self.start_and_wait_for_io_to_complete() + self.assertTrue(ret, ("Failed to write IOs when sub directory is" + " mounted from server side")) + + def tearDown(self): + """setUp starts the io from all the mounts. + IO creates deep dirs and files. + """ + + # Wait for IO to complete if io validation is not executed in the + # test method + if not self.io_validation_complete: + g.log.info("Wait for IO to complete as IO validation did not " + "succeed in test method") + ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts) + if not ret: + raise ExecutionError("IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + NfsGaneshaIOBaseClass.tearDown.im_func(self) + + @classmethod + def tearDownClass(cls): + + (NfsGaneshaIOBaseClass. + tearDownClass. + im_func(cls, + teardown_nfsganesha_cluster=False)) diff --git a/tests/functional/quota/__init__.py b/tests/functional/quota/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/functional/quota/test_non_existent_dir.py b/tests/functional/quota/test_non_existent_dir.py index 973e6b96e..666e75279 100644 --- a/tests/functional/quota/test_non_existent_dir.py +++ b/tests/functional/quota/test_non_existent_dir.py @@ -29,7 +29,7 @@ class QuotaNonExistentDir(GlusterBaseClass): @classmethod def setUpClass(cls): GlusterBaseClass.setUpClass.im_func(cls) - g.log.info("Starting %s " % cls.__name__) + g.log.info("Starting %s ", cls.__name__) def setUp(self): # SettingUp volume and Mounting the volume @@ -42,7 +42,7 @@ class QuotaNonExistentDir(GlusterBaseClass): if not ret: raise ExecutionError("Failed to setup and mount volume %s" % self.volname) - g.log.info("Volume %s has been setup successfully" % self.volname) + g.log.info("Volume %s has been setup successfully", self.volname) def test_non_existent_dir(self): # Displaying volume status and info @@ -64,8 +64,8 @@ class QuotaNonExistentDir(GlusterBaseClass): # Set Quota limit on the root of the volume g.log.info("Set Quota Limit on the path %s of the volume %s", path, self.volname) - ret, out, err = set_quota_limit_usage(self.mnode, self.volname, - path=path, limit="1GB") + ret, _, err = set_quota_limit_usage(self.mnode, self.volname, + path=path, limit="1GB") self.assertIn("No such file or directory", err, "Quota limit set " "on path /foo which does not exist") diff --git a/tests/functional/snapshot/test_256_snapshots.py b/tests/functional/snapshot/test_256_snapshots.py new file mode 100644 index 000000000..f3e6e4b0e --- /dev/null +++ b/tests/functional/snapshot/test_256_snapshots.py @@ -0,0 +1,172 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +Description : The purpose of this test is to validate create snap>256 +""" + +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.io.utils import validate_io_procs, get_mounts_stat +from glustolibs.gluster.snap_ops import get_snap_list, snap_delete_all + + +@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed', + 'distributed-dispersed'], ['glusterfs', 'nfs', 'cifs']]) +class SanpCreate256(GlusterBaseClass): + """ + Test for snapshot create for max 256 + Steps : + 1. Create and start a volume + 2. Mount the volume on a client + 3. Perform some heavy IO + 4. Varify IO + 5. modify max snap limit to default to 256. + 6. Create 256 snapshots + 7. Varify 256 created sucessfully + 8. Create 257th snapshot - check for failure + -- it should fail. + 9. Cleanup + + """ + @classmethod + def setUpClass(cls): + GlusterBaseClass.setUpClass.im_func(cls) + + # Upload io scripts for running IO on mounts + g.log.info("Upload io scripts to clients %s for running IO on " + "mounts", cls.clients) + script_local_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + ret = upload_scripts(cls.clients, script_local_path) + if not ret: + raise ExecutionError("Failed to upload IO scripts to clients %s" % + cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) + + def setUp(self): + """ + setUp method + """ + # Setup_Volume + GlusterBaseClass.setUpClass.im_func(self) + ret = self.setup_volume_and_mount_volume(mounts=self.mounts, + volume_create_force=True) + if not ret: + raise ExecutionError("Failed to setup and mount volume") + g.log.info("Volume %s has been setup successfully", self.volname) + + def tearDown(self): + """ + tearDown + """ + ret, _, _ = snap_delete_all(self.mnode) + if not ret: + raise ExecutionError("Failed to delete all snaps") + GlusterBaseClass.tearDown.im_func(self) + + @classmethod + def tearDownClass(cls): + """ + Clean up the volume & mount + """ + g.log.info("Starting volume and mount cleanup") + ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) + if not ret: + raise ExecutionError("Failed to cleanup volume and mount") + g.log.info("Cleanup successfull for the volume and mount") + + GlusterBaseClass.tearDownClass.im_func(cls) + + def test_validate_snaps_256(self): + + # Start IO on all mounts. + all_mounts_procs = [] + count = 1 + for mount_obj in self.mounts: + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) + cmd = ("python %s create_deep_dirs_with_files " + "--dirname-start-num %d " + "--dir-depth 2 " + "--dir-length 10 " + "--max-num-of-dirs 5 " + "--num-of-files 5 %s" % (self.script_upload_path, count, + mount_obj.mountpoint)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + all_mounts_procs.append(proc) + count = count + 10 + + # Validate IO + g.log.info("Validating IO's") + ret = validate_io_procs(all_mounts_procs, self.mounts) + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("Successfully validated all io's") + + # Get stat of all the files/dirs created. + g.log.info("Get stat of all the files/dirs created.") + ret = get_mounts_stat(self.mounts) + self.assertTrue(ret, "Stat failed on some of the clients") + g.log.info("Successfully got stat of all files/dirs created") + + # set config for 256 snpas (to make sure to override) + cmd_str = ("gluster snapshot config snap-max-hard-limit 256" + " --mode=script") + ret = g.run(self.mnode, cmd_str) + self.assertTrue(ret, "Failed to set snap-max-hard-limit to 256.") + g.log.info("snap-max-hard limit successfully set for 256.") + + # Create 256 snaps + for i in range(1, 257, 1): + cmd_str = "gluster snapshot create %s %s %s" % ( + "snapy%s" % i, self.volname, "no-timestamp") + ret = g.run(self.mnode, cmd_str) + self.assertTrue(ret, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot %s created successfully for volume %s", + "snapy%s" % i, self.volname) + + # Check for no. of snaps using snap_list it should be 256 + snap_list = get_snap_list(self.mnode) + self.assertTrue((len(snap_list) == 256), "No of snaps not consistent " + "for volume %s" % self.volname) + g.log.info("Successfully validated number of snaps.") + + # Validate all 256 snap names created during + for i in range(1, 257, 1): + self.assertTrue(("snapy%s" % i in snap_list), "%s snap not " + "found " % ("snapy%s" % i)) + g.log.info("Sucessfully validated names of snap") + + # Try to create 257th snapshot + cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname, + "no-timestamp") + ret = g.run(self.mnode, cmd_str) + self.assertEqual(ret, 1, ("Unexpected: Successfully created 'snap'" + " for volume %s" % self.volname)) + g.log.info("Snapshot 'snap' not created as it is 257th snap") + + # Check for no. of snaps using snap_list it should be 256 + snap_list = get_snap_list(self.mnode) + self.assertEqual(256, len(snap_list), "No of snaps not consistent " + "for volume %s" % self.volname) + g.log.info("Successfully validated number of snaps.") diff --git a/tests/functional/snapshot/test_auto_delete.py b/tests/functional/snapshot/test_auto_delete.py index a9db5dd4f..db8a50f0e 100644 --- a/tests/functional/snapshot/test_auto_delete.py +++ b/tests/functional/snapshot/test_auto_delete.py @@ -47,7 +47,7 @@ class DeleteSnapTests(GlusterBaseClass): ret = self.setup_volume() if not ret: raise ExecutionError("Failed to setup volume %s" % self.volname) - g.log.info("Volume %s has been setup successfully" % self.volname) + g.log.info("Volume %s has been setup successfully", self.volname) # enabling auto-delete cmd = "gluster snapshot config auto-delete enable" @@ -62,7 +62,7 @@ class DeleteSnapTests(GlusterBaseClass): self.assertTrue(ret, ("Failed to set snap-max-hardlimit" "config option for volume %s" % self.volname)) g.log.info("snap-max-hardlimit config option Successfully set for" - "volume %s" % self.volname) + "volume %s", self.volname) # Validating max-hard-limit hardlimit = get_snap_config(self.mnode) @@ -107,13 +107,13 @@ class DeleteSnapTests(GlusterBaseClass): self.assertEqual(ret, 0, ("Failed to list snapshot of volume %s" % self.volname)) g.log.info("Total number of snapshots created after auto-delete" - "enabled is %s" % out) + "enabled is %s", out) if out != 8: g.log.info("Failed to validate snapshots with expected" "number of snapshots") g.log.info("Snapshot Validation Successful") - g.log.info("Snapshot list command for volume %s was successful" - % self.volname) + g.log.info("Snapshot list command for volume %s was successful", + self.volname) def tearDown(self): # Calling GlusterBaseClass tearDown @@ -130,8 +130,8 @@ class DeleteSnapTests(GlusterBaseClass): ret = snap_delete_all(self.mnode) self.assertTrue(ret, ("Failed to delete snapshot of volume" "%s" % self.volname)) - g.log.info("Successfully deleted snapshots of volume %s" - % self.volname) + g.log.info("Successfully deleted snapshots of volume %s", + self.volname) # setting back default max-soft-limit to 90% option = {'snap-max-soft-limit': '90'} @@ -144,4 +144,4 @@ class DeleteSnapTests(GlusterBaseClass): ret = self.cleanup_volume() if not ret: raise ExecutionError("Failed to Cleanup Volume") - g.log.info("Cleanup volume %s Completed Successfully" % self.volname) + g.log.info("Cleanup volume %s Completed Successfully", self.volname) diff --git a/tests/functional/snapshot/test_create_brick_down.py b/tests/functional/snapshot/test_create_brick_down.py new file mode 100644 index 000000000..529c39a3d --- /dev/null +++ b/tests/functional/snapshot/test_create_brick_down.py @@ -0,0 +1,145 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +Description: +Test Cases in this module tests for +creating snapshot when the bricks are +down. + +""" +import random +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass +from glustolibs.gluster.gluster_base_class import runs_on +from glustolibs.gluster.volume_ops import get_volume_info +from glustolibs.gluster.snap_ops import snap_create, snap_list +from glustolibs.gluster.brick_libs import (get_all_bricks, + are_bricks_online, + bring_bricks_offline, + get_offline_bricks_list, + get_online_bricks_list) + + +@runs_on([['distributed-replicated', 'replicated', 'distributed'], + ['glusterfs', 'nfs', 'cifs']]) +class CreateSnapwhenBricksareDown(GlusterBaseClass): + """ + CreateSnapwhenBricksareDown contains tests + which validates creating snapshot + when the bricks are down + """ + def setUp(self): + # SetUp volume and Mount volume + GlusterBaseClass.setUpClass.im_func(self) + g.log.info("Starting to SetUp Volume") + ret = self.setup_volume_and_mount_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to setup volume %s" % self.volname) + g.log.info("Volume %s has been setup successfully", self.volname) + + def test_create_snap_bricks(self): + """ + 1. get brick list + 2. check all bricks are online + 3. Selecting one brick randomly to bring it offline + 4. get brick list + 5. check all bricks are online + 6. Offline Bricks list + 7. Online Bricks list + 8. Create snapshot of volume + 9. snapshot create should fail + """ + + bricks_list = [] + # get the bricks from the volume + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # check all bricks are online + g.log.info("Verifying all bricks are online or not.....") + ret = are_bricks_online(self.mnode, self.volname, + bricks_list) + self.assertTrue(ret, ("Not all bricks are online")) + g.log.info("All bricks are online.") + + # Selecting one brick randomly to bring it offline + g.log.info("Selecting one brick randomly to bring it offline") + brick_to_bring_offline = random.choice(bricks_list) + g.log.info("Brick to bring offline:%s ", brick_to_bring_offline) + ret = bring_bricks_offline(self.volname, brick_to_bring_offline, + None) + self.assertTrue(ret, "Failed to bring the bricks offline") + g.log.info("Randomly Selected brick: %s", brick_to_bring_offline) + + # get brick list + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # check all bricks are online + g.log.info("Verifying all bricks are online or not.....") + ret = are_bricks_online(self.mnode, self.volname, + bricks_list) + self.assertFalse(ret, ("Not all bricks are online")) + g.log.info("All bricks are online.") + + # get the bricks for the volume + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # Offline Bricks list + offbricks = get_offline_bricks_list(self.mnode, self.volname) + g.log.info("Bricks Offline: %s", offbricks) + + # Online Bricks list + onbricks = get_online_bricks_list(self.mnode, self.volname) + g.log.info("Bricks Online: %s", onbricks) + + # Create snapshot of volume + ret = snap_create(self.mnode, self.volname, "snap1", + False, "Description with $p3c1al characters!") + self.assertTrue(ret, ("Failed to create snapshot snap1")) + g.log.info("Snapshot snap1 of volume %s created Successfully", + self.volname) + + # Volume status + ret = get_volume_info(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to perform gluster volume" + "info on volume %s" + % self.volname)) + g.log.info("Gluster volume info on volume %s is successful", + self.volname) + # snapshot list + ret = snap_list(self.mnode) + self.assertTrue(ret, ("Failed to list snapshot of volume %s" + % self.volname)) + g.log.info("Snapshot list command for volume %s was successful", + self.volname) + + def tearDown(self): + # Calling GlusterBaseClass tearDown + GlusterBaseClass.tearDown.im_func(self) + + # Unmount and cleanup-volume + g.log.info("Starting to Unmount and cleanup-volume") + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to Unmount and Cleanup Volume") + g.log.info("Successful in Unmount Volume and Cleanup Volume") diff --git a/tests/functional/snapshot/test_snap_create_brickdown.py b/tests/functional/snapshot/test_snap_create_brickdown.py deleted file mode 100644 index 0f6d68dcd..000000000 --- a/tests/functional/snapshot/test_snap_create_brickdown.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" -Description: - -Test Cases in this module tests for -creating snapshot when the bricks are -down. - -""" -import random -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass -from glustolibs.gluster.gluster_base_class import runs_on -from glustolibs.gluster.volume_ops import get_volume_info -from glustolibs.gluster.snap_ops import snap_create, snap_list -from glustolibs.gluster.brick_libs import (get_all_bricks, - are_bricks_online, - bring_bricks_offline, - get_offline_bricks_list, - get_online_bricks_list) - - -@runs_on([['distributed-replicated', 'replicated', 'distributed'], - ['glusterfs', 'nfs', 'cifs']]) -class CreateSnapwhenBricksareDown(GlusterBaseClass): - """ - CreateSnapwhenBricksareDown contains tests - which validates creating snapshot - when the bricks are down - """ - def setUp(self): - # SetUp volume and Mount volume - GlusterBaseClass.setUpClass.im_func(self) - g.log.info("Starting to SetUp Volume") - ret = self.setup_volume_and_mount_volume(mounts=self.mounts) - if not ret: - raise ExecutionError("Failed to setup volume %s" % self.volname) - g.log.info("Volume %s has been setup successfully" % self.volname) - - def test_create_snap_bricks(self): - """ - 1. get brick list - 2. check all bricks are online - 3. Selecting one brick randomly to bring it offline - 4. get brick list - 5. check all bricks are online - 6. Offline Bricks list - 7. Online Bricks list - 8. Create snapshot of volume - 9. snapshot create should fail - """ - - bricks_list = [] - # get the bricks from the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # check all bricks are online - g.log.info("Verifying all bricks are online or not.....") - ret = are_bricks_online(self.mnode, self.volname, - bricks_list) - self.assertTrue(ret, ("Not all bricks are online")) - g.log.info("All bricks are online.") - - # Selecting one brick randomly to bring it offline - g.log.info("Selecting one brick randomly to bring it offline") - brick_to_bring_offline = random.choice(bricks_list) - g.log.info("Brick to bring offline:%s " % brick_to_bring_offline) - ret = bring_bricks_offline(self.volname, brick_to_bring_offline, - None) - self.assertTrue(ret, "Failed to bring the bricks offline") - g.log.info("Randomly Selected brick: %s" % brick_to_bring_offline) - - # get brick list - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # check all bricks are online - g.log.info("Verifying all bricks are online or not.....") - ret = are_bricks_online(self.mnode, self.volname, - bricks_list) - self.assertFalse(ret, ("Not all bricks are online")) - g.log.info("All bricks are online.") - - # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # Offline Bricks list - offbricks = get_offline_bricks_list(self.mnode, self.volname) - g.log.info("Bricks Offline: %s" % offbricks) - - # Online Bricks list - onbricks = get_online_bricks_list(self.mnode, self.volname) - g.log.info("Bricks Online: %s" % onbricks) - - # Create snapshot of volume - ret = snap_create(self.mnode, self.volname, "snap1", - False, "Description with $p3c1al characters!") - self.assertTrue(ret, ("Failed to create snapshot snap1")) - g.log.info("Snapshot snap1 of volume %s created Successfully" - % (self.volname)) - - # Volume status - ret = get_volume_info(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to perform gluster volume" - "info on volume %s" - % self.volname)) - g.log.info("Gluster volume info on volume %s is successful" - % self.volname) - # snapshot list - ret = snap_list(self.mnode) - self.assertTrue(ret, ("Failed to list snapshot of volume %s" - % self.volname)) - g.log.info("Snapshot list command for volume %s was successful" - % self.volname) - - def tearDown(self): - # Calling GlusterBaseClass tearDown - GlusterBaseClass.tearDown.im_func(self) - - # Unmount and cleanup-volume - g.log.info("Starting to Unmount and cleanup-volume") - ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) - if not ret: - raise ExecutionError("Failed to Unmount and Cleanup Volume") - g.log.info("Successful in Unmount Volume and Cleanup Volume") diff --git a/tests/functional/snapshot/test_snapshot_create.py b/tests/functional/snapshot/test_snapshot_create.py new file mode 100644 index 000000000..078dc956f --- /dev/null +++ b/tests/functional/snapshot/test_snapshot_create.py @@ -0,0 +1,205 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +Description : The purpose of this test is to validate snapshot create + +""" + +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.io.utils import validate_io_procs, get_mounts_stat +from glustolibs.gluster.snap_ops import (get_snap_list, snap_delete_all) + + +@runs_on([['replicated', 'distributed-replicated', 'dispersed', + 'distributed-dispersed'], + ['glusterfs', 'nfs', 'cifs']]) +class SnapCreate(GlusterBaseClass): + """ + Test for snapshot create + Steps : + 1. Create and start a volume + 2. Create a snapshot of volume using + -- gluster snapshot create + 3. Create snapshot of volume using + -- gluster snapshot create [description + ] + 4. Create one more snapshot of volume using + -- gluster snapshot create force + 5. Create one snapshot with option no-timestamp + 6. Mount the volume on a client + 7. Perform some heavy IO + 8. While files and directory creation is in progress, + create multiple gluster snapshots + 9. Do a snapshot list to see if all the snapshots are present + 10. Do a snapshot info to see all the snapshots information + 11. Verify that the IO is not hindered + 12. Arequal all the bricks in the snap volume + 13. Cleanup + + """ + @classmethod + def setUpClass(cls): + GlusterBaseClass.setUpClass.im_func(cls) + + # Upload io scripts for running IO on mounts + g.log.info("Upload io scripts to clients %s for running IO on " + "mounts", cls.clients) + script_local_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + ret = upload_scripts(cls.clients, script_local_path) + if not ret: + raise ExecutionError("Failed to upload IO scripts to clients %s" % + cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) + + def setUp(self): + """ + setUp method + """ + # Setup_Volume + GlusterBaseClass.setUpClass.im_func(self) + ret = self.setup_volume_and_mount_volume(mounts=self.mounts, + volume_create_force=True) + if not ret: + raise ExecutionError("Failed to setup and mount volume") + g.log.info("Volume %s has been setup successfully", self.volname) + + def tearDown(self): + """ + tearDown + """ + ret, _, _ = snap_delete_all(self.mnode) + if not ret: + raise ExecutionError("Failed to delete all snaps") + GlusterBaseClass.tearDown.im_func(self) + + @classmethod + def tearDownClass(cls): + """ + Clean up the volume & mount + """ + g.log.info("Starting volume and mount cleanup") + ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) + if not ret: + raise ExecutionError("Failed to cleanup volume and mount") + g.log.info("Cleanup successfull for the volume and mount") + + GlusterBaseClass.tearDownClass.im_func(cls) + + def test_validate_snaps_create(self): + """ + Creating snapshot using gluster snapshot create + """ + cmd_str = "gluster snapshot create %s %s" % ("snap1", self.volname) + ret = g.run(self.mnode, cmd_str) + self.assertTrue(ret, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot snap1 created successfully for volume %s", + self.volname) + + # Create snapshot of volume using + # -- gluster snapshot create [description + # ] + desc = 'description this is a snap with "snap2" name and description' + cmd_str = ("gluster snapshot create %s %s %s" + % ("snap2", self.volname, desc)) + ret = g.run(self.mnode, cmd_str) + self.assertTrue(ret, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot snap2 created successfully for volume %s", + (self.volname)) + + # Create one more snapshot of volume using force + cmd_str = ("gluster snapshot create %s %s %s" + % ("snap3", self.volname, "force")) + ret = g.run(self.mnode, cmd_str) + self.assertTrue(ret, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot snap3 created successfully for volume %s", + (self.volname)) + + # Create one more snapshot of volume using no-timestamp option + cmd_str = ("gluster snapshot create %s %s %s" + % ("snap4", self.volname, "no-timestamp")) + ret = g.run(self.mnode, cmd_str) + self.assertTrue(ret, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot snap4 created successfully for volume %s", + (self.volname)) + + # Delete all snaps + ret, _, _ = snap_delete_all(self.mnode) + self.assertEqual(ret, 0, "Snapshot delete failed.") + g.log.info("Successfully deleted all snaps") + + # Start IO on all mounts. + all_mounts_procs = [] + count = 1 + for mount_obj in self.mounts: + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) + cmd = ("python %s create_deep_dirs_with_files " + "--dirname-start-num %d " + "--dir-depth 2 " + "--dir-length 10 " + "--max-num-of-dirs 5 " + "--num-of-files 5 %s" % (self.script_upload_path, count, + mount_obj.mountpoint)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + all_mounts_procs.append(proc) + count = count + 10 + + # Create 5 snaps while IO is in progress + for i in range(0, 5): + cmd_str = "gluster snapshot create %s %s %s" % ( + "snapy%s" % i, self.volname, "no-timestamp") + ret = g.run(self.mnode, cmd_str) + self.assertTrue(ret, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot %s created successfully for volume %s", + "snapy%s" % i, self.volname) + + # Validate IO + g.log.info("Validating IO's") + ret = validate_io_procs(all_mounts_procs, self.mounts) + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("Successfully validated all io's") + + # Get stat of all the files/dirs created. + g.log.info("Get stat of all the files/dirs created.") + ret = get_mounts_stat(self.mounts) + self.assertTrue(ret, "Stat failed on some of the clients") + g.log.info("Successfully got stat of all files/dirs created") + + # Check for no of snaps using snap_list it should be 5 now + snap_list = get_snap_list(self.mnode) + self.assertEqual(5, len(snap_list), "No of snaps not consistent " + "for volume %s" % self.volname) + g.log.info("Successfully validated number of snaps.") + + # Validate all snaps created during IO + for i in range(0, 5): + self.assertIn("snapy%s" % i, snap_list, "%s snap not " + "found " % ("snapy%s" % i)) + g.log.info("Sucessfully validated names of snap") diff --git a/tests/functional/snapshot/test_snapshot_restore.py b/tests/functional/snapshot/test_snapshot_restore.py new file mode 100644 index 000000000..cbec67e04 --- /dev/null +++ b/tests/functional/snapshot/test_snapshot_restore.py @@ -0,0 +1,279 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +The purpose of this test is to validate restore of a snapshot. +""" + +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.gluster.brick_libs import get_all_bricks +from glustolibs.gluster.volume_libs import ( + shrink_volume, verify_all_process_of_volume_are_online) +from glustolibs.gluster.volume_ops import volume_reset +from glustolibs.io.utils import validate_io_procs, get_mounts_stat +from glustolibs.gluster.snap_ops import (snap_create, + snap_delete_all, + snap_restore_complete, + set_snap_config, + get_snap_config) + + +@runs_on([['distributed-replicated', 'distributed-dispersed'], + ['glusterfs']]) +class SnapRestore(GlusterBaseClass): + """ + Test for snapshot restore + Steps : + 1. Create and start a volume + 2. Mount the volume on a client + 3. Create data on the volume (v1) + 4. Set some volume option + 5. Take snapshot of volume + 6. Create some more data on volume (v2) + 7. Reset volume option + 8. Remove brick/bricks + 9. Stop volume + 10. Restore snapshot + 11. Start and mount volume + 12. Validate data on volume (v1) + 13. Validate volume option + 14. Validate bricks after restore + 15. Create snapshot of restored volume + 16. Cleanup + + """ + @classmethod + def setUpClass(cls): + GlusterBaseClass.setUpClass.im_func(cls) + + # Upload io scripts for running IO on mounts + g.log.info("Upload io scripts to clients %s for running IO on " + "mounts", cls.clients) + script_local_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + ret = upload_scripts(cls.clients, script_local_path) + if not ret: + raise ExecutionError("Failed to upload IO scripts to clients %s" % + cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) + + def setUp(self): + """ + setUp method + """ + # Setup_Volume + GlusterBaseClass.setUpClass.im_func(self) + ret = self.setup_volume_and_mount_volume(mounts=self.mounts, + volume_create_force=True) + if not ret: + raise ExecutionError("Failed to setup and mount volume") + g.log.info("Volume %s has been setup successfully", self.volname) + + def tearDown(self): + """ + tearDown + """ + ret, _, _ = snap_delete_all(self.mnode) + if not ret: + raise ExecutionError("Snapshot delete failed.") + GlusterBaseClass.tearDown.im_func(self) + + @classmethod + def tearDownClass(cls): + """ + Clean up the volume & mount + """ + g.log.info("Starting volume and mount cleanup") + ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) + if not ret: + raise ExecutionError("Failed to cleanup volume and mount") + g.log.info("Cleanup successfull for the volume and mount") + + GlusterBaseClass.tearDownClass.im_func(cls) + + def test_validate_snaps_restore(self): + # pylint: disable=too-many-statements + # Start IO on all mounts. + all_mounts_procs = [] + count = 1 + for mount_obj in self.mounts: + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) + cmd = ("python %s create_deep_dirs_with_files " + "--dirname-start-num %d " + "--dir-depth 2 " + "--dir-length 10 " + "--max-num-of-dirs 5 " + "--num-of-files 5 %s" % (self.script_upload_path, count, + mount_obj.mountpoint)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + all_mounts_procs.append(proc) + count = count + 10 + + # Validate IO + g.log.info("Validating IO's") + ret = validate_io_procs(all_mounts_procs, self.mounts) + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("Successfully validated all io's") + + # Get stat of all the files/dirs created. + g.log.info("Get stat of all the files/dirs created.") + ret = get_mounts_stat(self.mounts) + self.assertTrue(ret, "Stat failed on some of the clients") + g.log.info("Successfully got stat of all files/dirs created") + + # Setting some volume option related to snapshot + option_before_restore = {'volumeConfig': + [{'softLimit': '100', + 'effectiveHardLimit': '200', + 'hardLimit': '256'}], + 'systemConfig': + {'softLimit': '90%', + 'activateOnCreate': 'disable', + 'hardLimit': '256', + 'autoDelete': 'disable'}} + ret = set_snap_config(self.mnode, option_before_restore) + self.assertTrue(ret, ("Failed to set vol option on %s" + % self.volname)) + g.log.info("Volume options for%s is set successfully", self.volname) + + # Get brick list befor taking snap_restore + bricks_before_snap_restore = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List before snap restore " + "volume: %s", bricks_before_snap_restore) + + # Creating snapshot + ret = snap_create(self.mnode, self.volname, "snap1") + self.assertTrue(ret, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot snap1 created successfully for volume %s", + self.volname) + + # Again start IO on all mounts. + all_mounts_procs = [] + count = 1000 + for mount_obj in self.mounts: + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) + cmd = ("python %s create_deep_dirs_with_files " + "--dirname-start-num %d " + "--dir-depth 2 " + "--dir-length 10 " + "--max-num-of-dirs 5 " + "--num-of-files 5 %s" % (self.script_upload_path, count, + mount_obj.mountpoint)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + all_mounts_procs.append(proc) + count = count + 10 + + # Validate IO + g.log.info("Validating IO's") + ret = validate_io_procs(all_mounts_procs, self.mounts) + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("Successfully validated all io's") + + # Get stat of all the files/dirs created. + g.log.info("Get stat of all the files/dirs created.") + ret = get_mounts_stat(self.mounts) + self.assertTrue(ret, "Stat failed on some of the clients") + g.log.info("Successfully got stat of all files/dirs created") + + # Reset volume to make sure volume options will reset + ret = volume_reset(self.mnode, self.volname, force=False) + self.assertTrue(ret, ("Failed to reset %s" % self.volname)) + g.log.info("Reset Volume %s is Successful", self.volname) + + # Removing one brick + g.log.info("Starting volume shrink") + ret = shrink_volume(self.mnode, self.volname, force=True) + self.assertTrue(ret, ("Failed to shrink the volume on " + "volume %s", self.volname)) + g.log.info("Shrinking volume is successful on " + "volume %s", self.volname) + + # Restore snapshot + ret = snap_restore_complete(self.mnode, self.volname, "snap1") + self.assertTrue(ret, ("Failed to restore snap snap1 on the " + "volume %s", self.volname)) + g.log.info("Restore of volume is successful from snap1 on " + "volume %s", self.volname) + + # Validate volume is up and running + g.log.info("Verifying volume is up and process are online") + ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) + self.assertTrue(ret, ("Volume %s : All process are not online", + self.volname)) + g.log.info("Volume %s : All process are online", self.volname) + + # Get volume options post restore + option_after_restore = get_snap_config(self.mnode) + # Compare volume options + self.assertNotEqual(option_before_restore, option_after_restore, + "Volume Options are not same after snap restore") + + # Get brick list post restore + bricks_after_snap_restore = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List after snap restore " + "volume: %s", bricks_after_snap_restore) + # Compare brick_list + self.assertNotEqual(bricks_before_snap_restore, + bricks_after_snap_restore, + "Bricks are not same after snap restore") + + # Creating snapshot + ret = snap_create(self.mnode, self.volname, "snap2") + self.assertTrue(ret, ("Failed to create snapshot for %s" + % self.volname)) + g.log.info("Snapshot snap2 created successfully for volume %s", + self.volname) + + # Again start IO on all mounts after restore + all_mounts_procs = [] + count = 1000 + for mount_obj in self.mounts: + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) + cmd = ("python %s create_deep_dirs_with_files " + "--dirname-start-num %d " + "--dir-depth 2 " + "--dir-length 10 " + "--max-num-of-dirs 5 " + "--num-of-files 5 %s" % (self.script_upload_path, count, + mount_obj.mountpoint)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + all_mounts_procs.append(proc) + count = count + 10 + + # Validate IO + g.log.info("Validating IO's") + ret = validate_io_procs(all_mounts_procs, self.mounts) + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("Successfully validated all io's") + + # Get stat of all the files/dirs created. + g.log.info("Get stat of all the files/dirs created.") + ret = get_mounts_stat(self.mounts) + self.assertTrue(ret, "Stat failed on some of the clients") + g.log.info("Successfully got stat of all files/dirs created") diff --git a/tests/functional/snapshot/test_validate_snapshot_256.py b/tests/functional/snapshot/test_validate_snapshot_256.py deleted file mode 100644 index a52f2baf6..000000000 --- a/tests/functional/snapshot/test_validate_snapshot_256.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" -Description : The purpose of this test is to validate create snap>256 - -""" - -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.misc.misc_libs import upload_scripts -from glustolibs.io.utils import validate_io_procs, get_mounts_stat -from glustolibs.gluster.snap_ops import get_snap_list, snap_delete_all - - -@runs_on([['distributed'], ['replicated', 'distributed-replicated', - 'dispersed', 'distributed-dispersed'], - ['glusterfs', 'nfs', 'cifs']]) -class SanpCreate256(GlusterBaseClass): - """ - Test for snapshot create for max 256 - Steps : - 1. Create and start a volume - 2. Mount the volume on a client - 3. Perform some heavy IO - 4. Varify IO - 5. modify max snap limit to default to 256. - 6. Create 256 snapshots - 7. Varify 256 created sucessfully - 8. Create 257th snapshot - check for failure - -- it should fail. - 9. Cleanup - - """ - @classmethod - def setUpClass(cls): - GlusterBaseClass.setUpClass.im_func(cls) - - # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on " - "mounts", cls.clients) - script_local_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - ret = upload_scripts(cls.clients, script_local_path) - if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s", - cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s", - cls.clients) - - def setUp(self): - """ - setUp method - """ - # Setup_Volume - GlusterBaseClass.setUpClass.im_func(self) - ret = self.setup_volume_and_mount_volume(mounts=self.mounts, - volume_create_force=True) - if not ret: - raise ExecutionError("Failed to setup and mount volume") - g.log.info("Volume %s has been setup successfully" % self.volname) - - def tearDown(self): - """ - tearDown - """ - ret, _, _ = snap_delete_all(self.mnode) - if not ret: - raise ExecutionError("Failed to delete all snaps") - GlusterBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - """ - Clean up the volume & mount - """ - g.log.info("Starting volume and mount cleanup") - ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) - if not ret: - raise ExecutionError("Failed to cleanup volume and mount") - g.log.info("Cleanup successfull for the volume and mount") - - GlusterBaseClass.tearDownClass.im_func(cls) - - def test_validate_snaps_256(self): - - # Start IO on all mounts. - all_mounts_procs = [] - count = 1 - for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s", mount_obj.client_system, - mount_obj.mountpoint) - cmd = ("python %s create_deep_dirs_with_files " - "--dirname-start-num %d " - "--dir-depth 2 " - "--dir-length 10 " - "--max-num-of-dirs 5 " - "--num-of-files 5 %s" % (self.script_upload_path, count, - mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - all_mounts_procs.append(proc) - count = count + 10 - - # Validate IO - g.log.info("Validating IO's") - ret = validate_io_procs(all_mounts_procs, self.mounts) - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("Successfully validated all io's") - - # Get stat of all the files/dirs created. - g.log.info("Get stat of all the files/dirs created.") - ret = get_mounts_stat(self.mounts) - self.assertTrue(ret, "Stat failed on some of the clients") - g.log.info("Successfully got stat of all files/dirs created") - - # set config for 256 snpas (to make sure to override) - cmd_str = ("gluster snapshot config snap-max-hard-limit 256" - " --mode=script") - ret = g.run(self.mnode, cmd_str) - self.assertTrue(ret, "Failed to set snap-max-hard-limit to 256.") - g.log.info("snap-max-hard limit successfully set for 256.") - - # Create 256 snaps - for i in range(1, 257, 1): - cmd_str = "gluster snapshot create %s %s %s" % ( - "snapy%s" % i, self.volname, "no-timestamp") - ret = g.run(self.mnode, cmd_str) - self.assertTrue(ret, ("Failed to create snapshot for %s" - % self.volname)) - g.log.info("Snapshot %s created successfully for volume %s" - % ("snapy%s" % i, self.volname)) - - # Check for no. of snaps using snap_list it should be 256 - snap_list = get_snap_list(self.mnode) - self.assertTrue((len(snap_list) == 256), "No of snaps not consistent " - "for volume %s" % self.volname) - g.log.info("Successfully validated number of snaps.") - - # Validate all 256 snap names created during - for i in range(1, 257, 1): - self.assertTrue(("snapy%s" % i in snap_list), "%s snap not " - "found " % ("snapy%s" % i)) - g.log.info("Sucessfully validated names of snap") - - # Try to create 257th snapshot - cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname, - "no-timestamp") - ret = g.run(self.mnode, cmd_str) - self.assertEqual(ret, 1, ("Unexpected: Successfully created 'snap'" - " for volume %s" % self.volname)) - g.log.info("Snapshot 'snap' not created as it is 257th snap") - - # Check for no. of snaps using snap_list it should be 256 - snap_list = get_snap_list(self.mnode) - self.assertEqual(256, len(snap_list), "No of snaps not consistent " - "for volume %s" % self.volname) - g.log.info("Successfully validated number of snaps.") diff --git a/tests/functional/snapshot/test_validate_snapshot_create.py b/tests/functional/snapshot/test_validate_snapshot_create.py deleted file mode 100644 index f5e3297d3..000000000 --- a/tests/functional/snapshot/test_validate_snapshot_create.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" -Description : The purpose of this test is to validate snapshot create - -""" - -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.misc.misc_libs import upload_scripts -from glustolibs.io.utils import validate_io_procs, get_mounts_stat -from glustolibs.gluster.snap_ops import (get_snap_list, snap_delete_all) - - -@runs_on([['replicated', 'distributed-replicated', 'dispersed', - 'distributed-dispersed'], - ['glusterfs', 'nfs', 'cifs']]) -class SnapCreate(GlusterBaseClass): - """ - Test for snapshot create - Steps : - 1. Create and start a volume - 2. Create a snapshot of volume using - -- gluster snapshot create - 3. Create snapshot of volume using - -- gluster snapshot create [description - ] - 4. Create one more snapshot of volume using - -- gluster snapshot create force - 5. Create one snapshot with option no-timestamp - 6. Mount the volume on a client - 7. Perform some heavy IO - 8. While files and directory creation is in progress, - create multiple gluster snapshots - 9. Do a snapshot list to see if all the snapshots are present - 10. Do a snapshot info to see all the snapshots information - 11. Verify that the IO is not hindered - 12. Arequal all the bricks in the snap volume - 13. Cleanup - - """ - @classmethod - def setUpClass(cls): - GlusterBaseClass.setUpClass.im_func(cls) - - # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on " - "mounts", cls.clients) - script_local_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - ret = upload_scripts(cls.clients, script_local_path) - if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s", - cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s", - cls.clients) - - def setUp(self): - """ - setUp method - """ - # Setup_Volume - GlusterBaseClass.setUpClass.im_func(self) - ret = self.setup_volume_and_mount_volume(mounts=self.mounts, - volume_create_force=True) - if not ret: - raise ExecutionError("Failed to setup and mount volume") - g.log.info("Volume %s has been setup successfully" % self.volname) - - def tearDown(self): - """ - tearDown - """ - ret, _, _ = snap_delete_all(self.mnode) - if not ret: - raise ExecutionError("Failed to delete all snaps") - GlusterBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - """ - Clean up the volume & mount - """ - g.log.info("Starting volume and mount cleanup") - ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) - if not ret: - raise ExecutionError("Failed to cleanup volume and mount") - g.log.info("Cleanup successfull for the volume and mount") - - GlusterBaseClass.tearDownClass.im_func(cls) - - def test_validate_snaps_create(self): - # Creating snapshot using gluster snapshot create - cmd_str = "gluster snapshot create %s %s" % ("snap1", self.volname) - ret = g.run(self.mnode, cmd_str) - self.assertTrue(ret, ("Failed to create snapshot for %s" - % self.volname)) - g.log.info("Snapshot snap1 created successfully for volume %s" - % (self.volname)) - - """ Create snapshot of volume using - -- gluster snapshot create [description - ] - """ - desc = 'description this is a snap with "snap2" name and description' - cmd_str = ("gluster snapshot create %s %s %s" - % ("snap2", self.volname, desc)) - ret = g.run(self.mnode, cmd_str) - self.assertTrue(ret, ("Failed to create snapshot for %s" - % self.volname)) - g.log.info("Snapshot snap2 created successfully for volume %s" - % (self.volname)) - - # Create one more snapshot of volume using force - cmd_str = ("gluster snapshot create %s %s %s" - % ("snap3", self.volname, "force")) - ret = g.run(self.mnode, cmd_str) - self.assertTrue(ret, ("Failed to create snapshot for %s" - % self.volname)) - g.log.info("Snapshot snap3 created successfully for volume %s" - % (self.volname)) - - # Create one more snapshot of volume using no-timestamp option - cmd_str = ("gluster snapshot create %s %s %s" - % ("snap4", self.volname, "no-timestamp")) - ret = g.run(self.mnode, cmd_str) - self.assertTrue(ret, ("Failed to create snapshot for %s" - % self.volname)) - g.log.info("Snapshot snap4 created successfully for volume %s" - % (self.volname)) - - # Delete all snaps - ret, _, _ = snap_delete_all(self.mnode) - self.assertEqual(ret, 0, "Snapshot delete failed.") - g.log.info("Successfully deleted all snaps") - - # Start IO on all mounts. - all_mounts_procs = [] - count = 1 - for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s", mount_obj.client_system, - mount_obj.mountpoint) - cmd = ("python %s create_deep_dirs_with_files " - "--dirname-start-num %d " - "--dir-depth 2 " - "--dir-length 10 " - "--max-num-of-dirs 5 " - "--num-of-files 5 %s" % (self.script_upload_path, count, - mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - all_mounts_procs.append(proc) - count = count + 10 - - # Create 5 snaps while IO is in progress - for i in range(0, 5): - cmd_str = "gluster snapshot create %s %s %s" % ( - "snapy%s" % i, self.volname, "no-timestamp") - ret = g.run(self.mnode, cmd_str) - self.assertTrue(ret, ("Failed to create snapshot for %s" - % self.volname)) - g.log.info("Snapshot %s created successfully for volume %s" - % ("snapy%s" % i, self.volname)) - - # Validate IO - g.log.info("Validating IO's") - ret = validate_io_procs(all_mounts_procs, self.mounts) - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("Successfully validated all io's") - - # Get stat of all the files/dirs created. - g.log.info("Get stat of all the files/dirs created.") - ret = get_mounts_stat(self.mounts) - self.assertTrue(ret, "Stat failed on some of the clients") - g.log.info("Successfully got stat of all files/dirs created") - - # Check for no of snaps using snap_list it should be 5 now - snap_list = get_snap_list(self.mnode) - self.assertEqual(5, len(snap_list), "No of snaps not consistent " - "for volume %s" % self.volname) - g.log.info("Successfully validated number of snaps.") - - # Validate all snaps created during IO - for i in range(0, 5): - self.assertIn("snapy%s" % i, snap_list, "%s snap not " - "found " % ("snapy%s" % i)) - g.log.info("Sucessfully validated names of snap") diff --git a/tests/functional/snapshot/test_validate_snapshot_restore.py b/tests/functional/snapshot/test_validate_snapshot_restore.py deleted file mode 100644 index 5ac45182e..000000000 --- a/tests/functional/snapshot/test_validate_snapshot_restore.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" -Description : The purpose of this test is to validate restore of a snapshot. - -""" - -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.misc.misc_libs import upload_scripts -from glustolibs.gluster.brick_libs import get_all_bricks -from glustolibs.gluster.volume_libs import ( - shrink_volume, verify_all_process_of_volume_are_online) -from glustolibs.gluster.volume_ops import volume_reset -from glustolibs.io.utils import validate_io_procs, get_mounts_stat -from glustolibs.gluster.snap_ops import (snap_create, - snap_delete_all, - snap_restore_complete, - set_snap_config, - get_snap_config) - - -@runs_on([['distributed-replicated', 'distributed-dispersed'], - ['glusterfs']]) -class SnapRestore(GlusterBaseClass): - """ - Test for snapshot restore - Steps : - 1. Create and start a volume - 2. Mount the volume on a client - 3. Create data on the volume (v1) - 4. Set some volume option - 5. Take snapshot of volume - 6. Create some more data on volume (v2) - 7. Reset volume option - 8. Remove brick/bricks - 9. Stop volume - 10. Restore snapshot - 11. Start and mount volume - 12. Validate data on volume (v1) - 13. Validate volume option - 14. Validate bricks after restore - 15. Create snapshot of restored volume - 16. Cleanup - - """ - @classmethod - def setUpClass(cls): - GlusterBaseClass.setUpClass.im_func(cls) - - # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on " - "mounts", cls.clients) - script_local_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - ret = upload_scripts(cls.clients, script_local_path) - if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s", - cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s", - cls.clients) - - def setUp(self): - """ - setUp method - """ - # Setup_Volume - GlusterBaseClass.setUpClass.im_func(self) - ret = self.setup_volume_and_mount_volume(mounts=self.mounts, - volume_create_force=True) - if not ret: - raise ExecutionError("Failed to setup and mount volume") - g.log.info("Volume %s has been setup successfully" % self.volname) - - def tearDown(self): - """ - tearDown - """ - ret, _, _ = snap_delete_all(self.mnode) - if not ret: - raise ExecutionError("Snapshot delete failed.") - GlusterBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - """ - Clean up the volume & mount - """ - g.log.info("Starting volume and mount cleanup") - ret = cls.unmount_volume_and_cleanup_volume(cls.mounts) - if not ret: - raise ExecutionError("Failed to cleanup volume and mount") - g.log.info("Cleanup successfull for the volume and mount") - - GlusterBaseClass.tearDownClass.im_func(cls) - - def test_validate_snaps_restore(self): - # Start IO on all mounts. - all_mounts_procs = [] - count = 1 - for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s", mount_obj.client_system, - mount_obj.mountpoint) - cmd = ("python %s create_deep_dirs_with_files " - "--dirname-start-num %d " - "--dir-depth 2 " - "--dir-length 10 " - "--max-num-of-dirs 5 " - "--num-of-files 5 %s" % (self.script_upload_path, count, - mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - all_mounts_procs.append(proc) - count = count + 10 - - # Validate IO - g.log.info("Validating IO's") - ret = validate_io_procs(all_mounts_procs, self.mounts) - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("Successfully validated all io's") - - # Get stat of all the files/dirs created. - g.log.info("Get stat of all the files/dirs created.") - ret = get_mounts_stat(self.mounts) - self.assertTrue(ret, "Stat failed on some of the clients") - g.log.info("Successfully got stat of all files/dirs created") - - # Setting some volume option related to snapshot - option_before_restore = {'volumeConfig': - [{'softLimit': '100', - 'effectiveHardLimit': '200', - 'hardLimit': '256'}], - 'systemConfig': - {'softLimit': '90%', - 'activateOnCreate': 'disable', - 'hardLimit': '256', - 'autoDelete': 'disable'}} - ret = set_snap_config(self.mnode, option_before_restore) - self.assertTrue(ret, ("Failed to set vol option on %s" - % self.volname)) - g.log.info("Volume options for%s is set successfully" % self.volname) - - # Get brick list befor taking snap_restore - bricks_before_snap_restore = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List before snap restore " - "volume: %s" % bricks_before_snap_restore) - - # Creating snapshot - ret = snap_create(self.mnode, self.volname, "snap1") - self.assertTrue(ret, ("Failed to create snapshot for %s" - % self.volname)) - g.log.info("Snapshot snap1 created successfully for volume %s" - % (self.volname)) - - # Again start IO on all mounts. - all_mounts_procs = [] - count = 1000 - for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s", mount_obj.client_system, - mount_obj.mountpoint) - cmd = ("python %s create_deep_dirs_with_files " - "--dirname-start-num %d " - "--dir-depth 2 " - "--dir-length 10 " - "--max-num-of-dirs 5 " - "--num-of-files 5 %s" % (self.script_upload_path, count, - mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - all_mounts_procs.append(proc) - count = count + 10 - - # Validate IO - g.log.info("Validating IO's") - ret = validate_io_procs(all_mounts_procs, self.mounts) - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("Successfully validated all io's") - - # Get stat of all the files/dirs created. - g.log.info("Get stat of all the files/dirs created.") - ret = get_mounts_stat(self.mounts) - self.assertTrue(ret, "Stat failed on some of the clients") - g.log.info("Successfully got stat of all files/dirs created") - - # Reset volume to make sure volume options will reset - ret = volume_reset(self.mnode, self.volname, force=False) - self.assertTrue(ret, ("Failed to reset %s" % self.volname)) - g.log.info("Reset Volume %s is Successful" % self.volname) - - # Removing one brick - g.log.info("Starting volume shrink") - ret = shrink_volume(self.mnode, self.volname, force=True) - self.assertTrue(ret, ("Failed to shrink the volume on " - "volume %s", self.volname)) - g.log.info("Shrinking volume is successful on " - "volume %s", self.volname) - - # Restore snapshot - ret = snap_restore_complete(self.mnode, self.volname, "snap1") - self.assertTrue(ret, ("Failed to restore snap snap1 on the " - "volume %s", self.volname)) - g.log.info("Restore of volume is successful from snap1 on " - "volume %s", self.volname) - - # Validate volume is up and running - g.log.info("Verifying volume is up and process are online") - ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) - self.assertTrue(ret, ("Volume %s : All process are not online", - self.volname)) - g.log.info("Volume %s : All process are online", self.volname) - - # Get volume options post restore - option_after_restore = get_snap_config(self.mnode) - # Compare volume options - self.assertNotEqual(option_before_restore, option_after_restore, - "Volume Options are not same after snap restore") - - # Get brick list post restore - bricks_after_snap_restore = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List after snap restore " - "volume: %s" % bricks_after_snap_restore) - # Compare brick_list - self.assertNotEqual(bricks_before_snap_restore, - bricks_after_snap_restore, - "Bricks are not same after snap restore") - - # Creating snapshot - ret = snap_create(self.mnode, self.volname, "snap2") - self.assertTrue(ret, ("Failed to create snapshot for %s" - % self.volname)) - g.log.info("Snapshot snap2 created successfully for volume %s" - % (self.volname)) - - # Again start IO on all mounts after restore - all_mounts_procs = [] - count = 1000 - for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s", mount_obj.client_system, - mount_obj.mountpoint) - cmd = ("python %s create_deep_dirs_with_files " - "--dirname-start-num %d " - "--dir-depth 2 " - "--dir-length 10 " - "--max-num-of-dirs 5 " - "--num-of-files 5 %s" % (self.script_upload_path, count, - mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - all_mounts_procs.append(proc) - count = count + 10 - - # Validate IO - g.log.info("Validating IO's") - ret = validate_io_procs(all_mounts_procs, self.mounts) - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("Successfully validated all io's") - - # Get stat of all the files/dirs created. - g.log.info("Get stat of all the files/dirs created.") - ret = get_mounts_stat(self.mounts) - self.assertTrue(ret, "Stat failed on some of the clients") - g.log.info("Successfully got stat of all files/dirs created") -- cgit