summaryrefslogtreecommitdiffstats
path: root/tests/functional/afr
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/afr')
-rwxr-xr-xtests/functional/afr/heal/test_self_heal.py204
1 files changed, 0 insertions, 204 deletions
diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py
index 5eb11ff3b..ab3885740 100755
--- a/tests/functional/afr/heal/test_self_heal.py
+++ b/tests/functional/afr/heal/test_self_heal.py
@@ -404,210 +404,6 @@ class TestSelfHeal(GlusterBaseClass):
)
self.io_validation_complete = True
- def test_data_self_heal_algorithm_diff_heal_command(self):
- """
- Test Volume Option - 'cluster.data-self-heal-algorithm' : 'diff'
-
- Description:
- - set the volume option
- "metadata-self-heal": "off"
- "entry-self-heal": "off"
- "data-self-heal": "off"
- "data-self-heal-algorithm": "diff"
- "self-heal-daemon": "off"
- - create IO
- - calculate arequal
- - bring down all bricks processes from selected set
- - modify the data
- - get arequal before getting bricks online
- - bring bricks online
- - expand volume by adding bricks to the volume
- - do rebalance
- - set the volume option "self-heal-daemon": "on" and check for daemons
- - start healing
- - check if heal is completed
- - check for split-brain
- - calculate arequal and compare with arequal before bringing bricks
- offline and after bringing bricks online
- """
- # pylint: disable=too-many-branches,too-many-statements
- # Setting options
- g.log.info('Setting options...')
- options = {"metadata-self-heal": "off",
- "entry-self-heal": "off",
- "data-self-heal": "off",
- "data-self-heal-algorithm": "diff"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options')
- g.log.info("Options "
- "'metadata-self-heal', "
- "'entry-self-heal', "
- "'data-self-heal', "
- "'self-heal-daemon' "
- "are set to 'off',"
- "'data-self-heal-algorithm' "
- "is set to 'diff' successfully")
-
- # Creating files on client side
- for mount_object in self.mounts:
- g.log.info("Generating data for %s:%s",
- mount_object.client_system, mount_object.mountpoint)
- # Creating files
- command = ("python %s create_files -f 100 %s"
- % (self.script_upload_path, mount_object.mountpoint))
-
- proc = g.run_async(mount_object.client_system, command,
- user=mount_object.user)
- self.all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(self.all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
- self.io_validation_complete = True
-
- # Setting options
- g.log.info('Setting options...')
- options = {"self-heal-daemon": "off"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options')
- g.log.info("Option 'self-heal-daemon' is set to 'off' successfully")
-
- # Select bricks to bring offline
- bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
- self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
-
- # Bring brick offline
- g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
- ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
- self.assertTrue(ret, 'Failed to bring bricks %s offline' %
- bricks_to_bring_offline)
-
- ret = are_bricks_offline(self.mnode, self.volname,
- bricks_to_bring_offline)
- self.assertTrue(ret, 'Bricks %s are not offline'
- % bricks_to_bring_offline)
- g.log.info('Bringing bricks %s offline is successful',
- bricks_to_bring_offline)
-
- # Modify the data
- self.all_mounts_procs = []
- for mount_object in self.mounts:
- g.log.info("Modifying data for %s:%s",
- mount_object.client_system, mount_object.mountpoint)
- command = ("python %s create_files -f 100 --fixed-file-size 1M %s"
- % (self.script_upload_path, mount_object.mountpoint))
-
- proc = g.run_async(mount_object.client_system, command,
- user=mount_object.user)
- self.all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(self.all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
- self.io_validation_complete = True
-
- # Get arequal before getting bricks online
- g.log.info('Getting arequal before getting bricks online...')
- ret, result_before_online = collect_mounts_arequal(self.mounts)
- self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting arequal before getting bricks online '
- 'is successful')
-
- # Bring brick online
- g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
- ret = bring_bricks_online(self.mnode, self.volname,
- bricks_to_bring_offline)
- self.assertTrue(ret, 'Failed to bring bricks %s online' %
- bricks_to_bring_offline)
- g.log.info('Bringing bricks %s online is successful',
- bricks_to_bring_offline)
-
- # Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
- ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
- self.assertTrue(ret, ("Failed to wait for volume %s processes to "
- "be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
-
- # Verify volume's all process are online
- g.log.info("Verifying volume's all process are online")
- ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
- self.assertTrue(ret, ("Volume %s : All process are not online"
- % self.volname))
- g.log.info("Volume %s : All process are online", self.volname)
-
- # Expand volume by adding bricks to the volume
- g.log.info("Start adding bricks to volume...")
- ret = expand_volume(self.mnode, self.volname, self.servers,
- self.all_servers_info)
- self.assertTrue(ret, ("Failed to expand the volume when IO in "
- "progress on volume %s", self.volname))
- g.log.info("Expanding volume is successful on volume %s", self.volname)
- self.bricks_list = get_all_bricks(self.mnode, self.volname)
-
- # Do rebalance
- ret, _, _ = rebalance_start(self.mnode, self.volname)
- self.assertEqual(ret, 0, 'Failed to start rebalance')
- g.log.info('Rebalance is started')
-
- ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
- self.assertTrue(ret, 'Rebalance is not completed')
- g.log.info('Rebalance is completed successfully')
-
- # Setting options
- g.log.info('Setting options...')
- options = {"self-heal-daemon": "on"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options')
- g.log.info("Option 'self-heal-daemon' is set to 'on' successfully")
-
- # Wait for self-heal-daemons to be online
- g.log.info("Waiting for self-heal-daemons to be online")
- ret = is_shd_daemonized(self.all_servers)
- self.assertTrue(ret, "Either No self heal daemon process found")
- g.log.info("All self-heal-daemons are online")
-
- # Start healing
- ret = trigger_heal(self.mnode, self.volname)
- self.assertTrue(ret, 'Heal is not started')
- g.log.info('Healing is started')
-
- # Monitor heal completion
- ret = monitor_heal_completion(self.mnode, self.volname)
- self.assertTrue(ret, 'Heal has not yet completed')
-
- # Check if heal is completed
- ret = is_heal_complete(self.mnode, self.volname)
- self.assertTrue(ret, 'Heal is not complete')
- g.log.info('Heal is completed successfully')
-
- # Check for split-brain
- ret = is_volume_in_split_brain(self.mnode, self.volname)
- self.assertFalse(ret, 'Volume is in split-brain state')
- g.log.info('Volume is not in split-brain state')
-
- # Get arequal after getting bricks online
- g.log.info('Getting arequal after getting bricks online...')
- ret, result_after_online = collect_mounts_arequal(self.mounts)
- self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting arequal after getting bricks online '
- 'is successful')
-
- # Checking arequals before bringing bricks offline
- # and after bringing bricks online
- self.assertItemsEqual(result_before_online, result_after_online,
- 'Checksums are not equal')
- g.log.info('Checksums are equal')
-
def test_data_self_heal_algorithm_diff_default(self):
"""
Test Volume Option - 'cluster.data-self-heal-algorithm' : 'diff'