From fb5145be2db1a7c96b008af8a40e3b7b18df9673 Mon Sep 17 00:00:00 2001 From: Nigel Babu Date: Mon, 5 Mar 2018 15:49:23 +0530 Subject: Fix up coding style issues in tests Change-Id: I14609030983d4485dbce5a4ffed1e0353e3d1bc7 --- tests/functional/afr/heal/__init__.py | 0 .../heal/test_heal_info_while_accessing_file.py | 228 +++++++ tests/functional/afr/heal/test_self_heal.py | 150 +++-- .../afr/heal/test_self_heal_daemon_process.py | 645 ++++++++++++++++++++ tests/functional/afr/test_client_side_quorum.py | 158 +++-- .../afr/test_heal_info_while_accessing_file.py | 230 ------- .../afr/test_self_heal_daemon_process.py | 663 --------------------- 7 files changed, 1015 insertions(+), 1059 deletions(-) create mode 100644 tests/functional/afr/heal/__init__.py create mode 100644 tests/functional/afr/heal/test_heal_info_while_accessing_file.py mode change 100755 => 100644 tests/functional/afr/heal/test_self_heal.py create mode 100644 tests/functional/afr/heal/test_self_heal_daemon_process.py delete mode 100644 tests/functional/afr/test_heal_info_while_accessing_file.py delete mode 100644 tests/functional/afr/test_self_heal_daemon_process.py (limited to 'tests/functional/afr') diff --git a/tests/functional/afr/heal/__init__.py b/tests/functional/afr/heal/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/functional/afr/heal/test_heal_info_while_accessing_file.py b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py new file mode 100644 index 000000000..965adbdc1 --- /dev/null +++ b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py @@ -0,0 +1,228 @@ +# Copyright (C) 2015-2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from glusto.core import Glusto as g +from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on) +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.volume_libs import get_subvols +from glustolibs.gluster.brick_libs import (bring_bricks_offline, + are_bricks_offline, + get_all_bricks) + +from glustolibs.gluster.heal_ops import get_heal_info_summary +from glustolibs.misc.misc_libs import upload_scripts +from glustolibs.io.utils import (validate_io_procs, + list_all_files_and_dirs_mounts, + wait_for_io_to_complete) + + +@runs_on([['distributed-replicated'], + ['glusterfs']]) +class TestSelfHeal(GlusterBaseClass): + """ + Description: + Test cases related to + healing in default configuration of the volume + """ + + @classmethod + def setUpClass(cls): + # Calling GlusterBaseClass setUpClass + GlusterBaseClass.setUpClass.im_func(cls) + + # Upload io scripts for running IO on mounts + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) + script_local_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" + "file_dir_ops.py") + ret = upload_scripts(cls.clients, [script_local_path]) + if not ret: + raise ExecutionError("Failed to upload IO scripts to clients %s" + % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) + + cls.counter = 1 + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 in + # this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. + + def setUp(self): + # Calling GlusterBaseClass setUp + GlusterBaseClass.setUp.im_func(self) + + self.all_mounts_procs = [] + self.io_validation_complete = False + + # Setup Volume and Mount Volume + g.log.info("Starting to Setup Volume and Mount Volume") + ret = self.setup_volume_and_mount_volume(mounts=self.mounts, + volume_create_force=False) + if not ret: + raise ExecutionError("Failed to Setup_Volume and Mount_Volume") + g.log.info("Successful in Setup Volume and Mount Volume") + + self.bricks_list = get_all_bricks(self.mnode, self.volname) + + def tearDown(self): + """ + If test method failed before validating IO, tearDown waits for the + IO's to complete and checks for the IO exit status + + Cleanup and umount volume + """ + if not self.io_validation_complete: + g.log.info("Wait for IO to complete as IO validation did not " + "succeed in test method") + ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts) + if not ret: + raise ExecutionError("IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + # List all files and dirs created + g.log.info("List all files and directories:") + ret = list_all_files_and_dirs_mounts(self.mounts) + if not ret: + raise ExecutionError("Failed to list all files and dirs") + g.log.info("Listing all files and directories is successful") + + # Cleanup and umount volume + g.log.info("Starting to Unmount Volume and Cleanup Volume") + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to umount the vol & cleanup Volume") + g.log.info("Successful in umounting the volume and Cleanup") + + # Calling GlusterBaseClass teardown + GlusterBaseClass.tearDown.im_func(self) + + def test_heal_info_shouldnot_list_files_being_accessed(self): + """ + - bring brick 1 offline + - create files and validate IO + - get entries before accessing file + - get first filename from active subvol without offline bricks + - access and modify the file + - while accessing - get entries + - Compare entries before accessing and while accessing + - validate IO + """ + + # Bring 1-st brick offline + brick_to_bring_offline = [self.bricks_list[0]] + g.log.info('Bringing bricks %s offline...', brick_to_bring_offline) + ret = bring_bricks_offline(self.volname, brick_to_bring_offline) + self.assertTrue(ret, 'Failed to bring bricks %s offline' + % brick_to_bring_offline) + + ret = are_bricks_offline(self.mnode, self.volname, + brick_to_bring_offline) + self.assertTrue(ret, 'Bricks %s are not offline' + % brick_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + brick_to_bring_offline) + + # Creating files on client side + for mount_obj in self.mounts: + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_obj.mountpoint) + + # Creating files + cmd = ("python %s create_files -f 100 %s" + % (self.script_upload_path, mount_obj.mountpoint)) + + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + self.all_mounts_procs.append(proc) + + # Validate IO + g.log.info("Wait for IO to complete and validate IO ...") + ret = validate_io_procs(self.all_mounts_procs, self.mounts) + self.io_validation_complete = True + self.assertTrue(ret, "IO failed on some of the clients") + g.log.info("IO is successful on all mounts") + + # Get entries before accessing file + g.log.info("Getting entries_before_accessing file...") + entries_before_accessing = get_heal_info_summary( + self.mnode, self.volname) + self.assertNotEqual(entries_before_accessing, None, + 'Can`t get heal info summary') + g.log.info( + "Getting entries_before_accessing file finished successfully") + + # Get filename to access from active subvol without offline bricks + # Get last subvol + subvols = get_subvols(self.mnode, self.volname) + subvol_without_offline_brick = subvols['volume_subvols'][-1] + + # Get first brick server and brick path + # and get first file from filelist + subvol_mnode, mnode_brick = subvol_without_offline_brick[0].split(':') + ret, file_list, _ = g.run(subvol_mnode, 'ls %s' % mnode_brick) + file_to_edit = file_list.splitlines()[0] + + # Access and modify the file + g.log.info("Start modifying IO on all mounts...") + self.all_mounts_procs = [] + for mount_obj in self.mounts: + g.log.info("Modifying IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) + + cmd = ("cd %s/ ; " + "dd if=/dev/zero of=%s bs=1G count=1" + % (mount_obj.mountpoint, file_to_edit)) + proc = g.run_async(mount_obj.client_system, cmd, + user=mount_obj.user) + self.all_mounts_procs.append(proc) + g.log.info("IO on %s:%s is modified successfully", + mount_obj.client_system, mount_obj.mountpoint) + self.io_validation_complete = False + + # Get entries while accessing file + g.log.info("Getting entries while accessing file...") + entries_while_accessing = get_heal_info_summary( + self.mnode, self.volname) + self.assertNotEqual(entries_before_accessing, None, + 'Can`t get heal info summary') + g.log.info("Getting entries while accessing file " + "finished successfully") + + # Compare dicts before accessing and while accessing + g.log.info('Comparing entries before modifying and while modifying...') + ret = cmp(entries_before_accessing, entries_while_accessing) + self.assertEqual(ret, 0, 'Entries before modifying and while modifying' + 'are not equal') + g.log.info('Comparison entries before modifying and while modifying' + 'finished successfully.') + + # Validate IO + g.log.info("Wait for IO to complete and validate IO ...") + ret = validate_io_procs(self.all_mounts_procs, self.mounts) + self.assertTrue(ret, "IO failed on some of the clients") + self.io_validation_complete = True + g.log.info("IO is successful on all mounts") diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py old mode 100755 new mode 100644 index 7837d958c..b2e52e392 --- a/tests/functional/afr/heal/test_self_heal.py +++ b/tests/functional/afr/heal/test_self_heal.py @@ -55,8 +55,8 @@ class TestSelfHeal(GlusterBaseClass): GlusterBaseClass.setUpClass.im_func(cls) # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) script_local_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" @@ -65,23 +65,22 @@ class TestSelfHeal(GlusterBaseClass): if not ret: raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 + # in this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. def setUp(self): # Calling GlusterBaseClass setUp @@ -129,13 +128,6 @@ class TestSelfHeal(GlusterBaseClass): # Calling GlusterBaseClass teardown GlusterBaseClass.tearDown.im_func(self) - @classmethod - def tearDownClass(cls): - """tearDownClass. This will be executed once per class. - """ - # Calling GlusterBaseClass tearDownClass. - GlusterBaseClass.tearDownClass.im_func(cls) - def test_data_self_heal_daemon_off(self): """ Test Data-Self-Heal (heal command) @@ -166,22 +158,22 @@ class TestSelfHeal(GlusterBaseClass): in cycle - validate IO """ + # pylint: disable=too-many-statements # Setting options g.log.info('Setting options...') options = {"metadata-self-heal": "off", "entry-self-heal": "off", - "data-self-heal": "off", - } + "data-self-heal": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) - g.log.info("Successfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Successfully set %s for volume %s", + options, self.volname) # Creating files on client side for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 100 --fixed-file-size 1k %s" @@ -217,12 +209,12 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( self.mnode, self.volname)) bricks_to_bring_offline = filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks'])) + bricks_to_bring_offline_dict['hot_tier_bricks'] + + bricks_to_bring_offline_dict['cold_tier_bricks'] + + bricks_to_bring_offline_dict['volume_bricks'])) # Bring brick offline - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -231,8 +223,8 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Get areequal after getting bricks offline g.log.info('Getting areequal after getting bricks offline...') @@ -252,8 +244,8 @@ class TestSelfHeal(GlusterBaseClass): # Modify the data self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Modifying data for %s:%s" % - (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Modifying data for %s:%s", mount_obj.client_system, + mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 100 --fixed-file-size 10k %s" @@ -272,13 +264,13 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("IO is successful on all mounts") # Bring brick online - g.log.info('Bringing bricks %s online...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online...', bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Setting options g.log.info('Setting options...') @@ -300,7 +292,7 @@ class TestSelfHeal(GlusterBaseClass): ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") @@ -333,10 +325,10 @@ class TestSelfHeal(GlusterBaseClass): self.all_servers_info) self.assertTrue(ret, ("Failed to expand the volume %s", self.volname)) g.log.info("Expanding volume is successful on " - "volume %s" % self.volname) + "volume %s", self.volname) # Do rebalance - ret, out, err = rebalance_start(self.mnode, self.volname) + ret, _, _ = rebalance_start(self.mnode, self.volname) self.assertEqual(ret, 0, 'Failed to start rebalance') g.log.info('Rebalance is started') @@ -347,8 +339,8 @@ class TestSelfHeal(GlusterBaseClass): # Create 1k files self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Modifying data for %s:%s" % - (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Modifying data for %s:%s", mount_obj.client_system, + mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 1000 %s" @@ -363,7 +355,7 @@ class TestSelfHeal(GlusterBaseClass): bricks_list = get_all_bricks(self.mnode, self.volname) for brick in bricks_list: # Bring brick offline - g.log.info('Bringing bricks %s offline' % brick) + g.log.info('Bringing bricks %s offline', brick) ret = bring_bricks_offline(self.volname, [brick]) self.assertTrue(ret, 'Failed to bring bricks %s offline' % brick) @@ -371,17 +363,17 @@ class TestSelfHeal(GlusterBaseClass): [brick]) self.assertTrue(ret, 'Bricks %s are not offline' % brick) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Bring brick online - g.log.info('Bringing bricks %s online...' % brick) + g.log.info('Bringing bricks %s online...', brick) ret = bring_bricks_online(self.mnode, self.volname, [brick]) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Wait for volume processes to be online g.log.info("Wait for volume processes to be online") @@ -398,7 +390,7 @@ class TestSelfHeal(GlusterBaseClass): self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") @@ -442,13 +434,13 @@ class TestSelfHeal(GlusterBaseClass): - get areequal after getting bricks online and compare with arequal before bringing bricks online """ + # pylint: disable=too-many-statements # Setting options g.log.info('Setting options...') options = {"metadata-self-heal": "off", "entry-self-heal": "off", - "data-self-heal": "off", - } + "data-self-heal": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Options " @@ -461,9 +453,8 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("Starting IO on all mounts...") self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) cmd = ("python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-length 2 " @@ -476,9 +467,8 @@ class TestSelfHeal(GlusterBaseClass): user=mount_obj.user) self.all_mounts_procs.append(proc) self.counter = self.counter + 10 - g.log.info("IO on %s:%s is started successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("IO on %s:%s is started successfully", + mount_obj.client_system, mount_obj.mountpoint) self.io_validation_complete = False # Validate IO @@ -493,8 +483,7 @@ class TestSelfHeal(GlusterBaseClass): cmd_list = ["python %s create_files -f 20 %s", "python %s mv -i '.trashcan' %s", "python %s copy --dest-dir new_dir %s", - "python %s delete %s", - ] + "python %s delete %s"] for cmd in cmd_list: # Get areequal before getting bricks offline @@ -506,8 +495,7 @@ class TestSelfHeal(GlusterBaseClass): # Setting options g.log.info('Setting options...') - options = {"self-heal-daemon": "off", - } + options = {"self-heal-daemon": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Option 'self-heal-daemon' " @@ -517,13 +505,13 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( self.mnode, self.volname)) bricks_to_bring_offline = filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks'])) + bricks_to_bring_offline_dict['hot_tier_bricks'] + + bricks_to_bring_offline_dict['cold_tier_bricks'] + + bricks_to_bring_offline_dict['volume_bricks'])) # Bring brick offline - g.log.info('Bringing bricks %s offline...' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', + bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -532,8 +520,8 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Get areequal after getting bricks offline g.log.info('Getting areequal after getting bricks offline...') @@ -559,9 +547,8 @@ class TestSelfHeal(GlusterBaseClass): proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) self.all_mounts_procs.append(proc) - g.log.info("IO on %s:%s is modified successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("IO on %s:%s is modified successfully", + mount_obj.client_system, mount_obj.mountpoint) self.io_validation_complete = False # Validate IO @@ -586,19 +573,18 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("Listing all files and directories is successful") # Bring brick online - g.log.info('Bringing bricks %s online...' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online...', + bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Setting options g.log.info('Setting options...') - options = {"self-heal-daemon": "on", - } + options = {"self-heal-daemon": "on"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Option 'self-heal-daemon' is set to 'on' successfully") @@ -618,7 +604,7 @@ class TestSelfHeal(GlusterBaseClass): self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") diff --git a/tests/functional/afr/heal/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py new file mode 100644 index 000000000..3412c1b49 --- /dev/null +++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py @@ -0,0 +1,645 @@ +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" Description: + Test Cases in this module tests the self heal daemon process. +""" + +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.volume_libs import ( + expand_volume, shrink_volume, log_volume_info_and_status, + wait_for_volume_process_to_be_online) +from glustolibs.gluster.rebalance_ops import (rebalance_start, + wait_for_rebalance_to_complete, + rebalance_status) +from glustolibs.gluster.brick_libs import (get_all_bricks, + bring_bricks_offline, + bring_bricks_online, + are_bricks_online, + select_bricks_to_bring_offline) +from glustolibs.gluster.brick_ops import replace_brick +from glustolibs.gluster.heal_libs import (get_self_heal_daemon_pid, + do_bricks_exist_in_shd_volfile, + is_shd_daemonized, + are_all_self_heal_daemons_are_online) +from glustolibs.gluster.volume_ops import (volume_stop, volume_start) +from glustolibs.gluster.gluster_init import restart_glusterd + + +@runs_on([['replicated', 'distributed-replicated', 'dispersed', + 'distributed-dispersed'], ['glusterfs', 'nfs', 'cifs']]) +class SelfHealDaemonProcessTests(GlusterBaseClass): + """ + SelfHealDaemonProcessTests contains tests which verifies the + self-heal daemon process of the nodes + """ + def setUp(self): + """ + setup volume, mount volume and initialize necessary variables + which is used in tests + """ + + # calling GlusterBaseClass setUpClass + GlusterBaseClass.setUp.im_func(self) + + # Setup Volume and Mount Volume + g.log.info("Starting to Setup Volume and Mount Volume") + ret = self.setup_volume_and_mount_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to Setup_Volume and Mount_Volume") + g.log.info("Successful in Setup Volume and Mount Volume") + + # Verfiy glustershd process releases its parent process + ret = is_shd_daemonized(self.servers) + if not ret: + raise ExecutionError("Self Heal Daemon process was still" + " holding parent process.") + g.log.info("Self Heal Daemon processes are online") + + self.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol" + + def tearDown(self): + """ + Clean up the volume and umount volume from client + """ + + # stopping the volume + g.log.info("Starting to Unmount Volume and Cleanup Volume") + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to Unmount Volume and Cleanup Volume") + g.log.info("Successful in Unmount Volume and Cleanup Volume") + + # calling GlusterBaseClass tearDownClass + GlusterBaseClass.tearDown.im_func(self) + + def test_glustershd_with_add_remove_brick(self): + """ + Test script to verify glustershd process with adding and + removing bricks + + * check glustershd process - only 1 glustershd process should + be running + * bricks must be present in glustershd-server.vol file for + the replicated involved volumes + * Add bricks + * check glustershd process - only 1 glustershd process should + be running and its should be different from previous one + * bricks which are added must present in glustershd-server.vol file + * remove bricks + * check glustershd process - only 1 glustershd process should + be running and its different from previous one + * bricks which are removed should not present + in glustershd-server.vol file + + """ + # pylint: disable=too-many-statements + nodes = self.volume['servers'] + bricks_list = [] + glustershd_pids = {} + + # check the self-heal daemon process + g.log.info("Starting to get self-heal daemon process on " + "nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process " + "found : %s", pids)) + g.log.info("Successful in getting Single self heal daemon process" + " on all nodes %s", nodes) + glustershd_pids = pids + + # get the bricks for the volume + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # validate the bricks present in volume info with + # glustershd server volume file + g.log.info("Starting parsing file %s on " + "node %s", self.glustershd, self.mnode) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list) + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file. " + "Please check log file for details")) + g.log.info("Successfully parsed %s file", self.glustershd) + + # expanding volume + g.log.info("Start adding bricks to volume %s", self.volname) + ret = expand_volume(self.mnode, self.volname, self.servers, + self.all_servers_info) + self.assertTrue(ret, ("Failed to add bricks to " + "volume %s " % self.volname)) + g.log.info("Add brick successfull") + + # Log Volume Info and Status after expanding the volume + g.log.info("Logging volume info and Status after expanding volume") + ret = log_volume_info_and_status(self.mnode, self.volname) + self.assertTrue(ret, ("Logging volume info and status failed " + "on volume %s", self.volname)) + g.log.info("Successful in logging volume info and status " + "of volume %s", self.volname) + + # Verify volume's all process are online for 60 sec + g.log.info("Verifying volume's all process are online") + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + 60) + self.assertTrue(ret, ("Volume %s : All process are not " + "online", self.volname)) + g.log.info("Successfully Verified volume %s processes are online", + self.volname) + + # Start Rebalance + g.log.info("Starting Rebalance on the volume") + ret, _, err = rebalance_start(self.mnode, self.volname) + self.assertEqual(ret, 0, ("Failed to start rebalance on " + "the volume %s with error %s" % + (self.volname, err))) + g.log.info("Successfully started rebalance on the " + "volume %s", self.volname) + + # Log Rebalance status + g.log.info("Log Rebalance status") + _, _, _ = rebalance_status(self.mnode, self.volname) + + # Wait for rebalance to complete + g.log.info("Waiting for rebalance to complete") + ret = wait_for_rebalance_to_complete(self.mnode, self.volname) + self.assertTrue(ret, ("Rebalance is not yet complete " + "on the volume %s", self.volname)) + g.log.info("Rebalance is successfully complete on " + "the volume %s", self.volname) + + # Check Rebalance status after rebalance is complete + g.log.info("Checking Rebalance status") + ret, _, _ = rebalance_status(self.mnode, self.volname) + self.assertEqual(ret, 0, ("Failed to get rebalance status for " + "the volume %s", self.volname)) + g.log.info("Successfully got rebalance status of the " + "volume %s", self.volname) + + # Check the self-heal daemon process after adding bricks + g.log.info("Starting to get self-heal daemon process on " + "nodes %s", nodes) + glustershd_pids_after_expanding = {} + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + g.log.info("Successfull in getting self-heal daemon process " + "on nodes %s", nodes) + + glustershd_pids_after_expanding = pids + g.log.info("Self Heal Daemon Process ID's afetr expanding " + "volume: %s", glustershd_pids_after_expanding) + + self.assertNotEqual(glustershd_pids, + glustershd_pids_after_expanding, + "Self Daemon process is same before and" + " after adding bricks") + g.log.info("Self Heal Daemon Process is different before and " + "after adding bricks") + + # get the bricks for the volume after expanding + bricks_list_after_expanding = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List after expanding " + "volume: %s", bricks_list_after_expanding) + + # validate the bricks present in volume info + # with glustershd server volume file after adding bricks + g.log.info("Starting parsing file %s", self.glustershd) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list_after_expanding) + + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file after " + "expanding bricks. Please check log file " + "for details")) + g.log.info("Successfully parsed %s file", self.glustershd) + + # shrink the volume + g.log.info("Starting volume shrink") + ret = shrink_volume(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to shrink the volume on " + "volume %s", self.volname)) + g.log.info("Shrinking volume is successful on " + "volume %s", self.volname) + + # Log Volume Info and Status after shrinking the volume + g.log.info("Logging volume info and Status after shrinking volume") + ret = log_volume_info_and_status(self.mnode, self.volname) + self.assertTrue(ret, ("Logging volume info and status failed on " + "volume %s", self.volname)) + g.log.info("Successful in logging volume info and status " + "of volume %s", self.volname) + + # get the bricks after shrinking the volume + bricks_list_after_shrinking = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List after shrinking " + "volume: %s", bricks_list_after_shrinking) + + self.assertEqual(len(bricks_list_after_shrinking), len(bricks_list), + "Brick Count is mismatched after " + "shrinking the volume %s" % self.volname) + g.log.info("Brick Count matched before before expanding " + "and after shrinking volume") + + # Verfiy glustershd process releases its parent process + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + + # check the self-heal daemon process after removing bricks + g.log.info("Starting to get self-heal daemon process " + "on nodes %s", nodes) + glustershd_pids_after_shrinking = {} + ret, pids = get_self_heal_daemon_pid(nodes) + glustershd_pids_after_shrinking = pids + self.assertNotEqual(glustershd_pids_after_expanding, + glustershd_pids_after_shrinking, + "Self Heal Daemon process is same " + "after adding bricks and shrinking volume") + g.log.info("Self Heal Daemon Process is different after adding bricks " + "and shrinking volume") + + # validate bricks present in volume info + # with glustershd server volume file after removing bricks + g.log.info("Starting parsing file %s", self.glustershd) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list_after_shrinking) + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file after " + "removing bricks. Please check log file " + "for details")) + g.log.info("Successfully parsed %s file", self.glustershd) + + def test_glustershd_with_restarting_glusterd(self): + """ + Test Script to verify the self heal daemon process with restarting + glusterd and rebooting the server + + * stop all volumes + * restart glusterd - should not run self heal daemon process + * start replicated involved volumes + * single self heal daemon process running + * restart glusterd + * self heal daemon pid will change + * bring down brick and restart glusterd + * self heal daemon pid will change and its different from previous + * brought up the brick + + """ + # pylint: disable=too-many-statements + nodes = self.volume['servers'] + + # stop the volume + g.log.info("Stopping the volume %s", self.volname) + ret = volume_stop(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) + g.log.info("Successfully stopped volume %s", self.volname) + + # check the self heal daemon process after stopping the volume + g.log.info("Verifying the self heal daemon process for " + "volume %s", self.volname) + ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) + self.assertFalse(ret, ("Self Heal Daemon process is still running " + "even after stopping volume %s" % self.volname)) + g.log.info("Self Heal Daemon is not running after stopping " + "volume %s", self.volname) + + # restart glusterd service on all the servers + g.log.info("Restarting glusterd on all servers %s", nodes) + ret = restart_glusterd(nodes) + self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", + nodes)) + g.log.info("Successfully restarted glusterd on all nodes %s", + nodes) + + # check the self heal daemon process after restarting glusterd process + g.log.info("Starting to get self-heal daemon process on" + " nodes %s", nodes) + ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) + self.assertFalse(ret, ("Self Heal Daemon process is running after " + "glusterd restart with volume %s in " + "stop state" % self.volname)) + g.log.info("Self Heal Daemon is not running after stopping " + "volume and restarting glusterd %s", self.volname) + + # start the volume + g.log.info("Starting the volume %s", self.volname) + ret = volume_start(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) + g.log.info("Volume %s started successfully", self.volname) + + # Verfiy glustershd process releases its parent process + g.log.info("Checking whether glustershd process is daemonized or not") + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + g.log.info("Single self heal daemon process on all nodes %s", nodes) + + # get the self heal daemon pids after starting volume + g.log.info("Starting to get self-heal daemon process " + "on nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + g.log.info("Succesfull in getting self heal daemon pids") + glustershd_pids = pids + + # get the bricks for the volume + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # validate the bricks present in volume info + # with glustershd server volume file + g.log.info("Starting parsing file %s on " + "node %s", self.glustershd, self.mnode) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list) + self.assertTrue(ret, ("Brick List from volume info is different from " + "glustershd server volume file. " + "Please check log file for details.")) + g.log.info("Successfully parsed %s file", self.glustershd) + + # restart glusterd service on all the servers + g.log.info("Restarting glusterd on all servers %s", nodes) + ret = restart_glusterd(nodes) + self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", + nodes)) + g.log.info("Successfully restarted glusterd on all nodes %s", + nodes) + + # Verify volume's all process are online for 60 sec + g.log.info("Verifying volume's all process are online") + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + 60) + self.assertTrue(ret, ("Volume %s : All process are not " + "online", self.volname)) + g.log.info("Successfully Verified volume %s processes are online", + self.volname) + + # Verfiy glustershd process releases its parent process + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + + # check the self heal daemon process after starting volume and + # restarting glusterd process + g.log.info("Starting to get self-heal daemon process " + "on nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + glustershd_pids_after_glusterd_restart = pids + + self.assertNotEqual(glustershd_pids, + glustershd_pids_after_glusterd_restart, + ("Self Heal Daemon pids are same after " + "restarting glusterd process")) + g.log.info("Self Heal Daemon process are different before and " + "after restarting glusterd process") + + # select bricks to bring offline + bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( + self.mnode, self.volname)) + bricks_to_bring_offline = filter(None, ( + bricks_to_bring_offline_dict['hot_tier_bricks'] + + bricks_to_bring_offline_dict['cold_tier_bricks'] + + bricks_to_bring_offline_dict['volume_bricks'])) + + # bring bricks offline + g.log.info("Going to bring down the brick process " + "for %s", bricks_to_bring_offline) + ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) + self.assertTrue(ret, ("Failed to bring down the bricks. Please " + "check the log file for more details.")) + g.log.info("Brought down the brick process " + "for %s succesfully", bricks_to_bring_offline) + + # restart glusterd after brought down the brick + g.log.info("Restart glusterd on all servers %s", nodes) + ret = restart_glusterd(nodes) + self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", + nodes)) + g.log.info("Successfully restarted glusterd on all nodes %s", + nodes) + + # Verify volume's all process are online for 60 sec + g.log.info("Verifying volume's all process are online") + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + 60) + self.assertTrue(ret, ("Volume %s : All process are not " + "online", self.volname)) + g.log.info("Successfully Verified volume %s processes are online", + self.volname) + + # Verfiy glustershd process releases its parent process + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + + # check the self heal daemon process after killing brick and + # restarting glusterd process + g.log.info("Starting to get self-heal daemon process " + "on nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + glustershd_pids_after_killing_brick = pids + + self.assertNotEqual(glustershd_pids_after_glusterd_restart, + glustershd_pids_after_killing_brick, + ("Self Heal Daemon process are same from before " + "killing the brick,restarting glusterd process")) + g.log.info("Self Heal Daemon process are different after killing the " + "brick, restarting the glusterd process") + + # brought the brick online + g.log.info("bringing up the bricks : %s online", + bricks_to_bring_offline) + ret = bring_bricks_online(self.mnode, self.volname, + bricks_to_bring_offline) + self.assertTrue(ret, ("Failed to brought the bricks online")) + g.log.info("Successfully brought the bricks online") + + # check all bricks are online + g.log.info("Verifying all bricka are online or not.....") + ret = are_bricks_online(self.mnode, self.volname, + bricks_to_bring_offline) + self.assertTrue(ret, ("Not all bricks are online")) + g.log.info("All bricks are online.") + + +@runs_on([['replicated', 'distributed-replicated'], + ['glusterfs', 'nfs', 'cifs']]) +class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): + """ + ClientSideQuorumTests contains tests which verifies the + client side quorum Test Cases + """ + + @classmethod + def setUpClass(cls): + # Calling GlusterBaseClass setUpClass + GlusterBaseClass.setUpClass.im_func(cls) + + # Override Volumes + if cls.volume_type == "distributed-replicated": + # Define distributed-replicated volume + cls.volume['voltype'] = { + 'type': 'distributed-replicated', + 'dist_count': 2, + 'replica_count': 3, + 'arbiter_count': 1, + 'transport': 'tcp'} + + cls.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol" + + def setUp(self): + """ + setUp method for every test + """ + + # calling GlusterBaseClass setUp + GlusterBaseClass.setUp.im_func(self) + + self.all_mounts_procs = [] + self.io_validation_complete = False + + # Setup Volume and Mount Volume + g.log.info("Starting to Setup Volume %s", self.volname) + ret = self.setup_volume_and_mount_volume(self.mounts, + volume_create_force=False) + if not ret: + raise ExecutionError("Failed to Setup_Volume and Mount_Volume") + g.log.info("Successful in Setup Volume and Mount Volume") + + def tearDown(self): + """ + If test method failed before validating IO, tearDown waits for the + IO's to complete and checks for the IO exit status + + Cleanup and umount volume + """ + # Cleanup and umount volume + g.log.info("Starting to Unmount Volume and Cleanup Volume") + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) + if not ret: + raise ExecutionError("Failed to umount the vol & cleanup Volume") + g.log.info("Successful in umounting the volume and Cleanup") + + # Calling GlusterBaseClass teardown + GlusterBaseClass.tearDown.im_func(self) + + def test_impact_of_replace_brick_for_glustershd(self): + nodes = self.volume['servers'] + + # check the self-heal daemon process + g.log.info("Starting to get self-heal daemon process on " + "nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process " + "found : %s" % pids)) + g.log.info("Successful in getting Single self heal daemon process" + " on all nodes %s", nodes) + glustershd_pids = pids + + # get the bricks for the volume + g.log.info("Fetching bricks for the volume : %s", self.volname) + bricks_list = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List : %s", bricks_list) + + # validate the bricks present in volume info with + # glustershd server volume file + g.log.info("Starting parsing file %s on " + "node %s", self.glustershd, self.mnode) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list) + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file. " + "Please check log file for details")) + g.log.info("Successfully parsed %s file", self.glustershd) + + # replace brick + brick_to_replace = bricks_list[-1] + new_brick = brick_to_replace + 'new' + g.log.info("Replacing the brick %s for the volume : %s", + brick_to_replace, self.volname) + ret, _, err = replace_brick(self.mnode, self.volname, + brick_to_replace, new_brick) + self.assertFalse(ret, err) + g.log.info('Replaced brick %s to %s successfully', + brick_to_replace, new_brick) + + # check bricks + bricks_list = get_all_bricks(self.mnode, self.volname) + self.assertEqual(bricks_list[-1], new_brick, 'Replaced brick and ' + 'new brick are not equal') + + # Verify volume's all process are online for 60 sec + g.log.info("Verifying volume's all process are online") + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + timeout=60) + self.assertTrue(ret, ("Volume %s : All process are not " + "online", self.volname)) + g.log.info("Successfully Verified volume %s processes are online", + self.volname) + + # Verify glustershd process releases its parent process + ret = is_shd_daemonized(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process found")) + + # check the self-heal daemon process + g.log.info("Starting to get self-heal daemon process on " + "nodes %s", nodes) + ret, pids = get_self_heal_daemon_pid(nodes) + self.assertTrue(ret, ("Either No self heal daemon process found or " + "more than One self heal daemon process " + "found : %s" % pids)) + g.log.info("Successful in getting Single self heal daemon process" + " on all nodes %s", nodes) + glustershd_pids_after_replacement = pids + + # Compare pids before and after replacing + self.assertNotEqual(glustershd_pids, + glustershd_pids_after_replacement, + "Self Daemon process is same before and" + " after replacing bricks") + g.log.info("Self Heal Daemon Process is different before and " + "after replacing bricks") + + # get the bricks for the volume after replacing + bricks_list_after_replacing = get_all_bricks(self.mnode, self.volname) + g.log.info("Brick List after expanding " + "volume: %s", bricks_list_after_replacing) + + # validate the bricks present in volume info + # with glustershd server volume file after replacing bricks + g.log.info("Starting parsing file %s", self.glustershd) + ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, + bricks_list_after_replacing) + + self.assertTrue(ret, ("Brick List from volume info is different " + "from glustershd server volume file after " + "replacing bricks. Please check log file " + "for details")) + g.log.info("Successfully parsed %s file", self.glustershd) diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py index 2512faee3..ba0aaa772 100644 --- a/tests/functional/afr/test_client_side_quorum.py +++ b/tests/functional/afr/test_client_side_quorum.py @@ -18,6 +18,7 @@ Test Cases in this module tests the client side quorum. """ +import tempfile from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError @@ -33,10 +34,8 @@ from glustolibs.gluster.brick_libs import (bring_bricks_offline, from glustolibs.io.utils import (validate_io_procs, is_io_procs_fail_with_rofs, list_all_files_and_dirs_mounts, - wait_for_io_to_complete - ) + wait_for_io_to_complete) from glustolibs.gluster.mount_ops import mount_volume, umount_volume -import tempfile @runs_on([['replicated', 'distributed-replicated'], @@ -74,7 +73,7 @@ class ClientSideQuorumTests(GlusterBaseClass): GlusterBaseClass.setUp.im_func(self) # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume %s" % self.volname) + g.log.info("Starting to Setup Volume %s", self.volname) ret = self.setup_volume_and_mount_volume(self.mounts) if not ret: raise ExecutionError("Failed to Setup_Volume and Mount_Volume") @@ -93,6 +92,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("Successful in Unmount Volume and Cleanup Volume") # Calling GlusterBaseClass tearDown + GlusterBaseClass.tearDown.im_func(self) def test_client_side_quorum_with_auto_option(self): @@ -105,19 +105,19 @@ class ClientSideQuorumTests(GlusterBaseClass): * perform ops """ + # pylint: disable=too-many-branches,too-many-statements # set cluster.quorum-type to auto options = {"cluster.quorum-type": "auto"} g.log.info("setting cluster.quorum-type to auto on " - "volume %s" % self.volname) + "volume %s", self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for" "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Sucessfully set %s for volume %s", options, self.volname) # write files on all mounts g.log.info("Starting IO on all mounts...") - g.log.info("mounts: %s" % self.mounts) + g.log.info("mounts: %s", self.mounts) all_mounts_procs = [] for mount_obj in self.mounts: cmd = ("python %s create_files " @@ -134,28 +134,27 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("IO is successful on all mounts") # get the subvolumes - g.log.info("Starting to get sub-volumes for volume %s" % self.volname) + g.log.info("Starting to get sub-volumes for volume %s", self.volname) subvols_dict = get_subvols(self.mnode, self.volname) num_subvols = len(subvols_dict['volume_subvols']) - g.log.info("Number of subvolumes in volume %s:" % num_subvols) + g.log.info("Number of subvolumes in volume %s:", num_subvols) # bring bricks offline( 2 bricks ) for all the subvolumes for i in range(0, num_subvols): subvol_brick_list = subvols_dict['volume_subvols'][i] - g.log.info("sub-volume %s brick list : %s" - % (i, subvol_brick_list)) + g.log.info("sub-volume %s brick list : %s", i, subvol_brick_list) # For volume type: 1 * 2, bring 1 brick offline if len(subvol_brick_list) == 2: bricks_to_bring_offline = subvol_brick_list[0:1] else: bricks_to_bring_offline = subvol_brick_list[0:2] g.log.info("Going to bring down the brick process " - "for %s" % bricks_to_bring_offline) + "for %s", bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully" % bricks_to_bring_offline) + "for %s succesfully", bricks_to_bring_offline) # create 2 files named newfile0.txt and newfile1.txt g.log.info("Start creating 2 files on all mounts...") @@ -198,7 +197,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "ln %s/file0.txt %s/file0.txt_hwlink" \ % (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and creating hard link" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -211,7 +210,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "ln -s %s/file1.txt %s/file1.txt_swlink" %\ (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and creating soft link" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -224,7 +223,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "cat %s/file0.txt >> %s/file1.txt" %\ (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and append successful" " on read-only filesystem")) self.assertIn("Read-only file system", @@ -237,7 +236,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "echo 'Modify Contents' > %s/file1.txt"\ % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and modifying successful" " on read-only filesystem")) self.assertIn("Read-only file system", @@ -249,7 +248,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("Truncating file1.txt on all mounts") for mount_obj in self.mounts: cmd = "truncate -s 0 %s/file1.txt" % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and truncating file" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -277,7 +276,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("stat on file1.txt on all mounts") for mount_obj in self.mounts: cmd = "stat %s/file1.txt" % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and stat on file fails" " on read-only filesystem")) g.log.info("stat on file is successfull on read-only filesystem") @@ -287,7 +286,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = ("python %s stat %s" % (self.script_upload_path, mount_obj.mountpoint)) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and stat on directory" " fails on read-only filesystem")) g.log.info("stat on dir is successfull on read-only filesystem") @@ -297,7 +296,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = ("python %s ls %s" % (self.script_upload_path, mount_obj.mountpoint)) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and listing file fails" " on read-only filesystem")) g.log.info("listing files is successfull on read-only filesystem") @@ -316,33 +315,31 @@ class ClientSideQuorumTests(GlusterBaseClass): # set cluster.quorum-type to fixed options = {"cluster.quorum-type": "fixed"} - g.log.info("setting %s for the volume %s" % (options, self.volname)) + g.log.info("setting %s for the volume %s", options, self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set %s for volume %s" % (options, self.volname))) - g.log.info("Successfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Successfully set %s for volume %s", options, self.volname) # get the subvolumes - g.log.info("Starting to get sub-volumes for volume %s" % self.volname) + g.log.info("Starting to get sub-volumes for volume %s", self.volname) subvols_dict = get_subvols(self.mnode, self.volname) num_subvols = len(subvols_dict['volume_subvols']) - g.log.info("Number of subvolumes in volume %s is %s" - % (self.volname, num_subvols)) + g.log.info("Number of subvolumes in volume %s is %s", self.volname, + num_subvols) # get the number of bricks in replica set num_bricks_in_subvol = len(subvols_dict['volume_subvols'][0]) - g.log.info("Number of bricks in each replica set : %s" - % num_bricks_in_subvol) + g.log.info("Number of bricks in each replica set : %s", + num_bricks_in_subvol) # set cluster.quorum-count to higher value than the number of bricks in # repliac set start_range = num_bricks_in_subvol + 1 end_range = num_bricks_in_subvol + 30 for i in range(start_range, end_range): - options = {"cluster.quorum-count": "%s" % start_range} - g.log.info("setting %s for the volume %s" % - (options, self.volname)) + options = {"cluster.quorum-count": "%s" % i} + g.log.info("setting %s for the volume %s", options, self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertFalse(ret, ("Able to set %s for volume %s, quorum-count" " should not be greater than number of" @@ -350,7 +347,7 @@ class ClientSideQuorumTests(GlusterBaseClass): % (options, self.volname))) g.log.info("Expected: Unable to set %s for volume %s, " "quorum-count should be less than number of bricks " - "in replica set" % (options, self.volname)) + "in replica set", options, self.volname) @runs_on([['distributed-replicated'], @@ -363,8 +360,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): GlusterBaseClass.setUpClass.im_func(cls) # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) script_local_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" @@ -373,23 +370,22 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): if not ret: raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 in + # this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. # Setup Volumes if cls.volume_type == "distributed-replicated": @@ -407,8 +403,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s_%d' % (cls.volume['voltype']['type'], i), 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Define two 2x3 distributed-replicated volumes for i in range(1, 3): @@ -422,8 +417,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s_%d' % (cls.volume['voltype']['type'], i+2), 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Define distributed volume cls.volume['voltype'] = { @@ -435,8 +429,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s' % cls.volume['voltype']['type'], 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Create and mount volumes cls.mount_points = [] @@ -450,7 +443,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): force=False) if not ret: raise ExecutionError("Failed to setup Volume" - " %s", volume_config['name']) + " %s" % volume_config['name']) g.log.info("Successful in setting volume %s", volume_config['name']) @@ -468,8 +461,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): raise ExecutionError( "Failed to do gluster mount on volume %s " % cls.volname) - g.log.info("Successfully mounted %s on client %s" - % (cls.volname, cls.client)) + g.log.info("Successfully mounted %s on client %s", + cls.volname, cls.client) def setUp(self): # Calling GlusterBaseClass setUp @@ -515,7 +508,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): ret = cleanup_volume(cls.mnode, volume) if not ret: raise ExecutionError("Failed to cleanup Volume %s" % volume) - g.log.info("Volume: %s cleanup is done" % volume) + g.log.info("Volume: %s cleanup is done", volume) g.log.info("Successfully Cleanedup all Volumes") # umount all volumes @@ -525,8 +518,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): raise ExecutionError( "Failed to umount on volume %s " % cls.volname) - g.log.info("Successfully umounted %s on client %s" - % (cls.volname, cls.client)) + g.log.info("Successfully umounted %s on client %s", cls.volname, + cls.client) # calling GlusterBaseClass tearDownClass GlusterBaseClass.tearDownClass.im_func(cls) @@ -545,12 +538,13 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): - bring down b0 on vol1 and b0 and b1 on vol3 - try to create files on all vols and check for result """ + # pylint: disable=too-many-locals,too-many-statements # Creating files for all volumes for mount_point in self.mount_points: self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_point)) + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_point) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 50 " @@ -576,19 +570,17 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): % vol_number) options = {"cluster.quorum-type": "auto"} g.log.info("setting cluster.quorum-type to auto on " - "volume testvol_distributed-replicated_%s" - % vol_number) + "volume testvol_distributed-replicated_%s", vol_number) ret = set_volume_options(self.mnode, vol_name, options) self.assertTrue(ret, ("Unable to set volume option %s for " "volume %s" % (options, vol_name))) - g.log.info("Sucessfully set %s for volume %s" - % (options, vol_name)) + g.log.info("Sucessfully set %s for volume %s", options, vol_name) # check is options are set correctly volume_list = get_volume_list(self.mnode) for volume in volume_list: - g.log.info('Checking for cluster.quorum-type option for %s' - % volume) + g.log.info('Checking for cluster.quorum-type option for %s', + volume) volume_options_dict = get_volume_options(self.mnode, volume, 'cluster.quorum-type') @@ -599,16 +591,14 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): 'Option cluster.quorum-type ' 'is not AUTO for %s' % volume) - g.log.info('Option cluster.quorum-type is AUTO for %s' - % volume) + g.log.info('Option cluster.quorum-type is AUTO for %s', volume) else: self.assertEqual(volume_options_dict['cluster.quorum-type'], 'none', 'Option cluster.quorum-type ' 'is not NONE for %s' % volume) - g.log.info('Option cluster.quorum-type is NONE for %s' - % volume) + g.log.info('Option cluster.quorum-type is NONE for %s', volume) # Get first brick server and brick path # and get first file from filelist then delete it from volume @@ -616,11 +606,11 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): for volume in volume_list: brick_list = get_all_bricks(self.mnode, volume) brick_server, brick_path = brick_list[0].split(':') - ret, file_list, err = g.run(brick_server, 'ls %s' % brick_path) + ret, file_list, _ = g.run(brick_server, 'ls %s' % brick_path) self.assertFalse(ret, 'Failed to ls files on %s' % brick_server) file_from_vol = file_list.splitlines()[0] - ret, out, err = g.run(brick_server, 'rm -rf %s/%s' - % (brick_path, file_from_vol)) + ret, _, _ = g.run(brick_server, 'rm -rf %s/%s' + % (brick_path, file_from_vol)) self.assertFalse(ret, 'Failed to rm file on %s' % brick_server) vols_file_list[volume] = file_from_vol @@ -629,7 +619,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): volname = 'testvol_distributed-replicated_1' brick_list = get_all_bricks(self.mnode, volname) bricks_to_bring_offline = brick_list[0:1] - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -638,14 +628,14 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # bring first two bricks for testvol_distributed-replicated_3 volname = 'testvol_distributed-replicated_3' brick_list = get_all_bricks(self.mnode, volname) bricks_to_bring_offline = brick_list[0:2] - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -654,8 +644,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # merge two dicts (volname: file_to_delete) and (volname: mountpoint) temp_dict = [vols_file_list, self.mount_points_and_volnames] diff --git a/tests/functional/afr/test_heal_info_while_accessing_file.py b/tests/functional/afr/test_heal_info_while_accessing_file.py deleted file mode 100644 index 316880318..000000000 --- a/tests/functional/afr/test_heal_info_while_accessing_file.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (C) 2015-2016 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -from glusto.core import Glusto as g -from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on) -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.volume_libs import get_subvols -from glustolibs.gluster.brick_libs import (bring_bricks_offline, - are_bricks_offline, - get_all_bricks) - -from glustolibs.gluster.heal_ops import get_heal_info_summary -from glustolibs.misc.misc_libs import upload_scripts -from glustolibs.io.utils import (validate_io_procs, - list_all_files_and_dirs_mounts, - wait_for_io_to_complete) - - -@runs_on([['distributed-replicated'], - ['glusterfs']]) -class TestSelfHeal(GlusterBaseClass): - """ - Description: - Test cases related to - healing in default configuration of the volume - """ - - @classmethod - def setUpClass(cls): - # Calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - - # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) - script_local_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" - "file_dir_ops.py") - ret = upload_scripts(cls.clients, [script_local_path]) - if not ret: - raise ExecutionError("Failed to upload IO scripts to clients %s" - % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) - - cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ - - def setUp(self): - # Calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) - - self.all_mounts_procs = [] - self.io_validation_complete = False - - # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume and Mount Volume") - ret = self.setup_volume_and_mount_volume(mounts=self.mounts, - volume_create_force=False) - if not ret: - raise ExecutionError("Failed to Setup_Volume and Mount_Volume") - g.log.info("Successful in Setup Volume and Mount Volume") - - self.bricks_list = get_all_bricks(self.mnode, self.volname) - - def tearDown(self): - """ - If test method failed before validating IO, tearDown waits for the - IO's to complete and checks for the IO exit status - - Cleanup and umount volume - """ - if not self.io_validation_complete: - g.log.info("Wait for IO to complete as IO validation did not " - "succeed in test method") - ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts) - if not ret: - raise ExecutionError("IO failed on some of the clients") - g.log.info("IO is successful on all mounts") - - # List all files and dirs created - g.log.info("List all files and directories:") - ret = list_all_files_and_dirs_mounts(self.mounts) - if not ret: - raise ExecutionError("Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") - - # Cleanup and umount volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") - ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) - if not ret: - raise ExecutionError("Failed to umount the vol & cleanup Volume") - g.log.info("Successful in umounting the volume and Cleanup") - - # Calling GlusterBaseClass teardown - GlusterBaseClass.tearDown.im_func(self) - - def test_heal_info_shouldnot_list_files_being_accessed(self): - """ - - bring brick 1 offline - - create files and validate IO - - get entries before accessing file - - get first filename from active subvol without offline bricks - - access and modify the file - - while accessing - get entries - - Compare entries before accessing and while accessing - - validate IO - """ - - # Bring 1-st brick offline - brick_to_bring_offline = [self.bricks_list[0]] - g.log.info('Bringing bricks %s offline...' % brick_to_bring_offline) - ret = bring_bricks_offline(self.volname, brick_to_bring_offline) - self.assertTrue(ret, 'Failed to bring bricks %s offline' - % brick_to_bring_offline) - - ret = are_bricks_offline(self.mnode, self.volname, - brick_to_bring_offline) - self.assertTrue(ret, 'Bricks %s are not offline' - % brick_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % brick_to_bring_offline) - - # Creating files on client side - for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_obj.mountpoint)) - - # Creating files - cmd = ("python %s create_files -f 100 %s" - % (self.script_upload_path, mount_obj.mountpoint)) - - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - self.all_mounts_procs.append(proc) - - # Validate IO - g.log.info("Wait for IO to complete and validate IO ...") - ret = validate_io_procs(self.all_mounts_procs, self.mounts) - self.io_validation_complete = True - self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("IO is successful on all mounts") - - # Get entries before accessing file - g.log.info("Getting entries_before_accessing file...") - entries_before_accessing = get_heal_info_summary( - self.mnode, self.volname) - self.assertNotEqual(entries_before_accessing, None, - 'Can`t get heal info summary') - g.log.info( - "Getting entries_before_accessing file finished successfully") - - # Get filename to access from active subvol without offline bricks - # Get last subvol - subvols = get_subvols(self.mnode, self.volname) - subvol_without_offline_brick = subvols['volume_subvols'][-1] - - # Get first brick server and brick path - # and get first file from filelist - subvol_mnode, mnode_brick = subvol_without_offline_brick[0].split(':') - ret, file_list, err = g.run(subvol_mnode, 'ls %s' % mnode_brick) - file_to_edit = file_list.splitlines()[0] - - # Access and modify the file - g.log.info("Start modifying IO on all mounts...") - self.all_mounts_procs = [] - for mount_obj in self.mounts: - g.log.info("Modifying IO on %s:%s", mount_obj.client_system, - mount_obj.mountpoint) - - cmd = ("cd %s/ ; " - "dd if=/dev/zero of=%s bs=1G count=1" - % (mount_obj.mountpoint, file_to_edit)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) - self.all_mounts_procs.append(proc) - g.log.info("IO on %s:%s is modified successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) - self.io_validation_complete = False - - # Get entries while accessing file - g.log.info("Getting entries while accessing file...") - entries_while_accessing = get_heal_info_summary( - self.mnode, self.volname) - self.assertNotEqual(entries_before_accessing, None, - 'Can`t get heal info summary') - g.log.info("Getting entries while accessing file " - "finished successfully") - - # Compare dicts before accessing and while accessing - g.log.info('Comparing entries before modifying and while modifying...') - ret = cmp(entries_before_accessing, entries_while_accessing) - self.assertEqual(ret, 0, 'Entries before modifying and while modifying' - 'are not equal') - g.log.info('Comparison entries before modifying and while modifying' - 'finished successfully.') - - # Validate IO - g.log.info("Wait for IO to complete and validate IO ...") - ret = validate_io_procs(self.all_mounts_procs, self.mounts) - self.assertTrue(ret, "IO failed on some of the clients") - self.io_validation_complete = True - g.log.info("IO is successful on all mounts") diff --git a/tests/functional/afr/test_self_heal_daemon_process.py b/tests/functional/afr/test_self_heal_daemon_process.py deleted file mode 100644 index f3c416687..000000000 --- a/tests/functional/afr/test_self_heal_daemon_process.py +++ /dev/null @@ -1,663 +0,0 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" Description: - Test Cases in this module tests the self heal daemon process. -""" - -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.volume_libs import ( - expand_volume, shrink_volume, log_volume_info_and_status, - wait_for_volume_process_to_be_online) -from glustolibs.gluster.rebalance_ops import (rebalance_start, - wait_for_rebalance_to_complete, - rebalance_status) -from glustolibs.gluster.brick_libs import (get_all_bricks, - bring_bricks_offline, - bring_bricks_online, - are_bricks_online, - select_bricks_to_bring_offline) -from glustolibs.gluster.brick_ops import replace_brick -from glustolibs.gluster.heal_libs import (get_self_heal_daemon_pid, - do_bricks_exist_in_shd_volfile, - is_shd_daemonized, - are_all_self_heal_daemons_are_online) -from glustolibs.gluster.volume_ops import (volume_stop, volume_start) -from glustolibs.gluster.gluster_init import restart_glusterd - - -@runs_on([['replicated', 'distributed-replicated', 'dispersed', - 'distributed-dispersed'], ['glusterfs', 'nfs', 'cifs']]) -class SelfHealDaemonProcessTests(GlusterBaseClass): - """ - SelfHealDaemonProcessTests contains tests which verifies the - self-heal daemon process of the nodes - """ - @classmethod - def setUpClass(cls): - """ - setup volume, mount volume and initialize necessary variables - which is used in tests - """ - - # calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - - # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume and Mount Volume") - ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts) - if not ret: - raise ExecutionError("Failed to Setup_Volume and Mount_Volume") - g.log.info("Successful in Setup Volume and Mount Volume") - - # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(cls.servers) - if not ret: - raise ExecutionError("Self Heal Daemon process was still" - " holding parent process.") - g.log.info("Self Heal Daemon processes are online") - - cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol" - - def setUp(self): - """ - setUp method for every test - """ - - # calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) - - def tearDown(self): - """ - tearDown for every test - """ - - # Calling GlusterBaseClass tearDown - GlusterBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - """ - Clean up the volume and umount volume from client - """ - - # stopping the volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") - ret = cls.unmount_volume_and_cleanup_volume(mounts=cls.mounts) - if not ret: - raise ExecutionError("Failed to Unmount Volume and Cleanup Volume") - g.log.info("Successful in Unmount Volume and Cleanup Volume") - - # calling GlusterBaseClass tearDownClass - GlusterBaseClass.tearDownClass.im_func(cls) - - def test_glustershd_with_add_remove_brick(self): - """ - Test script to verify glustershd process with adding and - removing bricks - - * check glustershd process - only 1 glustershd process should - be running - * bricks must be present in glustershd-server.vol file for - the replicated involved volumes - * Add bricks - * check glustershd process - only 1 glustershd process should - be running and its should be different from previous one - * bricks which are added must present in glustershd-server.vol file - * remove bricks - * check glustershd process - only 1 glustershd process should - be running and its different from previous one - * bricks which are removed should not present - in glustershd-server.vol file - - """ - - nodes = self.volume['servers'] - bricks_list = [] - glustershd_pids = {} - - # check the self-heal daemon process - g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process " - "found : %s" % pids)) - g.log.info("Successful in getting Single self heal daemon process" - " on all nodes %s", nodes) - glustershd_pids = pids - - # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # validate the bricks present in volume info with - # glustershd server volume file - g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list) - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file. " - "Please check log file for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - # expanding volume - g.log.info("Start adding bricks to volume %s" % self.volname) - ret = expand_volume(self.mnode, self.volname, self.servers, - self.all_servers_info) - self.assertTrue(ret, ("Failed to add bricks to " - "volume %s " % self.volname)) - g.log.info("Add brick successfull") - - # Log Volume Info and Status after expanding the volume - g.log.info("Logging volume info and Status after expanding volume") - ret = log_volume_info_and_status(self.mnode, self.volname) - self.assertTrue(ret, ("Logging volume info and status failed " - "on volume %s", self.volname)) - g.log.info("Successful in logging volume info and status " - "of volume %s", self.volname) - - # Verify volume's all process are online for 60 sec - g.log.info("Verifying volume's all process are online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, - 60) - self.assertTrue(ret, ("Volume %s : All process are not " - "online", self.volname)) - g.log.info("Successfully Verified volume %s processes are online", - self.volname) - - # Start Rebalance - g.log.info("Starting Rebalance on the volume") - ret, out, err = rebalance_start(self.mnode, self.volname) - self.assertEqual(ret, 0, ("Failed to start rebalance on " - "the volume %s with error %s" % - (self.volname, err))) - g.log.info("Successfully started rebalance on the " - "volume %s", self.volname) - - # Log Rebalance status - g.log.info("Log Rebalance status") - _, _, _ = rebalance_status(self.mnode, self.volname) - - # Wait for rebalance to complete - g.log.info("Waiting for rebalance to complete") - ret = wait_for_rebalance_to_complete(self.mnode, self.volname) - self.assertTrue(ret, ("Rebalance is not yet complete " - "on the volume %s", self.volname)) - g.log.info("Rebalance is successfully complete on " - "the volume %s", self.volname) - - # Check Rebalance status after rebalance is complete - g.log.info("Checking Rebalance status") - ret, _, _ = rebalance_status(self.mnode, self.volname) - self.assertEqual(ret, 0, ("Failed to get rebalance status for " - "the volume %s", self.volname)) - g.log.info("Successfully got rebalance status of the " - "volume %s", self.volname) - - # Check the self-heal daemon process after adding bricks - g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) - glustershd_pids_after_expanding = {} - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - g.log.info("Successfull in getting self-heal daemon process " - "on nodes %s" % nodes) - - glustershd_pids_after_expanding = pids - g.log.info("Self Heal Daemon Process ID's afetr expanding " - "volume: %s" % glustershd_pids_after_expanding) - - self.assertNotEqual(glustershd_pids, - glustershd_pids_after_expanding, - "Self Daemon process is same before and" - " after adding bricks") - g.log.info("Self Heal Daemon Process is different before and " - "after adding bricks") - - # get the bricks for the volume after expanding - bricks_list_after_expanding = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List after expanding " - "volume: %s" % bricks_list_after_expanding) - - # validate the bricks present in volume info - # with glustershd server volume file after adding bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list_after_expanding) - - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file after " - "expanding bricks. Please check log file " - "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - # shrink the volume - g.log.info("Starting volume shrink") - ret = shrink_volume(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to shrink the volume on " - "volume %s", self.volname)) - g.log.info("Shrinking volume is successful on " - "volume %s", self.volname) - - # Log Volume Info and Status after shrinking the volume - g.log.info("Logging volume info and Status after shrinking volume") - ret = log_volume_info_and_status(self.mnode, self.volname) - self.assertTrue(ret, ("Logging volume info and status failed on " - "volume %s", self.volname)) - g.log.info("Successful in logging volume info and status " - "of volume %s", self.volname) - - # get the bricks after shrinking the volume - bricks_list_after_shrinking = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List after shrinking " - "volume: %s" % bricks_list_after_shrinking) - - self.assertEqual(len(bricks_list_after_shrinking), len(bricks_list), - "Brick Count is mismatched after " - "shrinking the volume %s" % self.volname) - g.log.info("Brick Count matched before before expanding " - "and after shrinking volume") - - # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - - # check the self-heal daemon process after removing bricks - g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) - glustershd_pids_after_shrinking = {} - ret, pids = get_self_heal_daemon_pid(nodes) - glustershd_pids_after_shrinking = pids - self.assertNotEqual(glustershd_pids_after_expanding, - glustershd_pids_after_shrinking, - "Self Heal Daemon process is same " - "after adding bricks and shrinking volume") - g.log.info("Self Heal Daemon Process is different after adding bricks " - "and shrinking volume") - - # validate bricks present in volume info - # with glustershd server volume file after removing bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list_after_shrinking) - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file after " - "removing bricks. Please check log file " - "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - def test_glustershd_with_restarting_glusterd(self): - """ - Test Script to verify the self heal daemon process with restarting - glusterd and rebooting the server - - * stop all volumes - * restart glusterd - should not run self heal daemon process - * start replicated involved volumes - * single self heal daemon process running - * restart glusterd - * self heal daemon pid will change - * bring down brick and restart glusterd - * self heal daemon pid will change and its different from previous - * brought up the brick - - """ - - nodes = self.volume['servers'] - - # stop the volume - g.log.info("Stopping the volume %s" % self.volname) - ret = volume_stop(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) - g.log.info("Successfully stopped volume %s" % self.volname) - - # check the self heal daemon process after stopping the volume - g.log.info("Verifying the self heal daemon process for " - "volume %s" % self.volname) - ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) - self.assertFalse(ret, ("Self Heal Daemon process is still running " - "even after stopping volume %s" % self.volname)) - g.log.info("Self Heal Daemon is not running after stopping " - "volume %s" % self.volname) - - # restart glusterd service on all the servers - g.log.info("Restarting glusterd on all servers %s", nodes) - ret = restart_glusterd(nodes) - self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", - nodes)) - g.log.info("Successfully restarted glusterd on all nodes %s", - nodes) - - # check the self heal daemon process after restarting glusterd process - g.log.info("Starting to get self-heal daemon process on" - " nodes %s" % nodes) - ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) - self.assertFalse(ret, ("Self Heal Daemon process is running after " - "glusterd restart with volume %s in " - "stop state" % self.volname)) - g.log.info("Self Heal Daemon is not running after stopping " - "volume and restarting glusterd %s" % self.volname) - - # start the volume - g.log.info("Starting the volume %s" % self.volname) - ret = volume_start(self.mnode, self.volname) - self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) - g.log.info("Volume %s started successfully" % self.volname) - - # Verfiy glustershd process releases its parent process - g.log.info("Checking whether glustershd process is daemonized or not") - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - g.log.info("Single self heal daemon process on all nodes %s" % nodes) - - # get the self heal daemon pids after starting volume - g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - g.log.info("Succesfull in getting self heal daemon pids") - glustershd_pids = pids - - # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # validate the bricks present in volume info - # with glustershd server volume file - g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list) - self.assertTrue(ret, ("Brick List from volume info is different from " - "glustershd server volume file. " - "Please check log file for details.")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - # restart glusterd service on all the servers - g.log.info("Restarting glusterd on all servers %s", nodes) - ret = restart_glusterd(nodes) - self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", - nodes)) - g.log.info("Successfully restarted glusterd on all nodes %s", - nodes) - - # Verify volume's all process are online for 60 sec - g.log.info("Verifying volume's all process are online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, - 60) - self.assertTrue(ret, ("Volume %s : All process are not " - "online", self.volname)) - g.log.info("Successfully Verified volume %s processes are online", - self.volname) - - # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - - # check the self heal daemon process after starting volume and - # restarting glusterd process - g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - glustershd_pids_after_glusterd_restart = pids - - self.assertNotEqual(glustershd_pids, - glustershd_pids_after_glusterd_restart, - ("Self Heal Daemon pids are same after " - "restarting glusterd process")) - g.log.info("Self Heal Daemon process are different before and " - "after restarting glusterd process") - - # select bricks to bring offline - bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( - self.mnode, self.volname)) - bricks_to_bring_offline = filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks'])) - - # bring bricks offline - g.log.info("Going to bring down the brick process " - "for %s" % bricks_to_bring_offline) - ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) - self.assertTrue(ret, ("Failed to bring down the bricks. Please " - "check the log file for more details.")) - g.log.info("Brought down the brick process " - "for %s succesfully" % bricks_to_bring_offline) - - # restart glusterd after brought down the brick - g.log.info("Restart glusterd on all servers %s", nodes) - ret = restart_glusterd(nodes) - self.assertTrue(ret, ("Failed to restart glusterd on all nodes %s", - nodes)) - g.log.info("Successfully restarted glusterd on all nodes %s", - nodes) - - # Verify volume's all process are online for 60 sec - g.log.info("Verifying volume's all process are online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, - 60) - self.assertTrue(ret, ("Volume %s : All process are not " - "online", self.volname)) - g.log.info("Successfully Verified volume %s processes are online", - self.volname) - - # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - - # check the self heal daemon process after killing brick and - # restarting glusterd process - g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - glustershd_pids_after_killing_brick = pids - - self.assertNotEqual(glustershd_pids_after_glusterd_restart, - glustershd_pids_after_killing_brick, - ("Self Heal Daemon process are same from before " - "killing the brick,restarting glusterd process")) - g.log.info("Self Heal Daemon process are different after killing the " - "brick, restarting the glusterd process") - - # brought the brick online - g.log.info("bringing up the bricks : %s online" % - bricks_to_bring_offline) - ret = bring_bricks_online(self.mnode, self.volname, - bricks_to_bring_offline) - self.assertTrue(ret, ("Failed to brought the bricks online")) - g.log.info("Successfully brought the bricks online") - - # check all bricks are online - g.log.info("Verifying all bricka are online or not.....") - ret = are_bricks_online(self.mnode, self.volname, - bricks_to_bring_offline) - self.assertTrue(ret, ("Not all bricks are online")) - g.log.info("All bricks are online.") - - -@runs_on([['replicated', 'distributed-replicated'], - ['glusterfs', 'nfs', 'cifs']]) -class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): - """ - ClientSideQuorumTests contains tests which verifies the - client side quorum Test Cases - """ - - @classmethod - def setUpClass(cls): - # Calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) - - # Override Volumes - if cls.volume_type == "distributed-replicated": - # Define distributed-replicated volume - cls.volume['voltype'] = { - 'type': 'distributed-replicated', - 'dist_count': 2, - 'replica_count': 3, - 'arbiter_count': 1, - 'transport': 'tcp'} - - cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol" - - def setUp(self): - """ - setUp method for every test - """ - - # calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) - - self.all_mounts_procs = [] - self.io_validation_complete = False - - # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume %s" % self.volname) - ret = self.setup_volume_and_mount_volume(self.mounts, - volume_create_force=False) - if not ret: - raise ExecutionError("Failed to Setup_Volume and Mount_Volume") - g.log.info("Successful in Setup Volume and Mount Volume") - - def tearDown(self): - """ - If test method failed before validating IO, tearDown waits for the - IO's to complete and checks for the IO exit status - - Cleanup and umount volume - """ - # Cleanup and umount volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") - ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) - if not ret: - raise ExecutionError("Failed to umount the vol & cleanup Volume") - g.log.info("Successful in umounting the volume and Cleanup") - - # Calling GlusterBaseClass teardown - GlusterBaseClass.tearDown.im_func(self) - - def test_impact_of_replace_brick_for_glustershd(self): - nodes = self.volume['servers'] - - # check the self-heal daemon process - g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process " - "found : %s" % pids)) - g.log.info("Successful in getting Single self heal daemon process" - " on all nodes %s", nodes) - glustershd_pids = pids - - # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) - bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) - - # validate the bricks present in volume info with - # glustershd server volume file - g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list) - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file. " - "Please check log file for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) - - # replace brick - brick_to_replace = bricks_list[-1] - new_brick = brick_to_replace + 'new' - g.log.info("Replacing the brick %s for the volume : %s" - % (brick_to_replace, self.volname)) - ret, out, err = replace_brick(self.mnode, self.volname, - brick_to_replace, new_brick) - self.assertFalse(ret, err) - g.log.info('Replaced brick %s to %s successfully' - % (brick_to_replace, new_brick)) - - # check bricks - bricks_list = get_all_bricks(self.mnode, self.volname) - self.assertEqual(bricks_list[-1], new_brick, 'Replaced brick and ' - 'new brick are not equal') - - # Verify volume's all process are online for 60 sec - g.log.info("Verifying volume's all process are online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, - timeout=60) - self.assertTrue(ret, ("Volume %s : All process are not " - "online", self.volname)) - g.log.info("Successfully Verified volume %s processes are online", - self.volname) - - # Verify glustershd process releases its parent process - ret = is_shd_daemonized(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process found")) - - # check the self-heal daemon process - g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) - ret, pids = get_self_heal_daemon_pid(nodes) - self.assertTrue(ret, ("Either No self heal daemon process found or " - "more than One self heal daemon process " - "found : %s" % pids)) - g.log.info("Successful in getting Single self heal daemon process" - " on all nodes %s", nodes) - glustershd_pids_after_replacement = pids - - # Compare pids before and after replacing - self.assertNotEqual(glustershd_pids, - glustershd_pids_after_replacement, - "Self Daemon process is same before and" - " after replacing bricks") - g.log.info("Self Heal Daemon Process is different before and " - "after replacing bricks") - - # get the bricks for the volume after replacing - bricks_list_after_replacing = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List after expanding " - "volume: %s" % bricks_list_after_replacing) - - # validate the bricks present in volume info - # with glustershd server volume file after replacing bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) - ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, - bricks_list_after_replacing) - - self.assertTrue(ret, ("Brick List from volume info is different " - "from glustershd server volume file after " - "replacing bricks. Please check log file " - "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) -- cgit