summaryrefslogtreecommitdiffstats
path: root/tests/functional/afr
diff options
context:
space:
mode:
authornchilaka <nchilaka@redhat.com>2020-07-22 11:46:32 +0530
committerBala Konda Reddy M <bala12352@gmail.com>2020-07-27 05:39:04 +0000
commit6b8f2f0c23a1d5939bdf32dfa70d4bb7deaf7ba7 (patch)
treeb509d40e808066e8f0b93d0242c3e6d9c6705565 /tests/functional/afr
parent989dbc665b721258cefce1fc2736076696c1cbfc (diff)
[TestFix] AFR Data heal by default and explicit self-heal command
- Remove unneccessary disablement of client side heal options - Check if client side heal options are disabled by default - Test data heal by default method - Explicit data heal by calling self heal command Change-Id: I3be9001fc1cf124a4cf5a290cee985e166c0b685 Signed-off-by: nchilaka <nchilaka@redhat.com>
Diffstat (limited to 'tests/functional/afr')
-rwxr-xr-xtests/functional/afr/heal/test_self_heal.py155
1 files changed, 29 insertions, 126 deletions
diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py
index 3363b54..4fb6dea 100755
--- a/tests/functional/afr/heal/test_self_heal.py
+++ b/tests/functional/afr/heal/test_self_heal.py
@@ -16,14 +16,12 @@
# pylint: disable=too-many-lines
from glusto.core import Glusto as g
-
from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.gluster.volume_ops import get_volume_options
from glustolibs.gluster.volume_libs import (
verify_all_process_of_volume_are_online,
wait_for_volume_process_to_be_online)
-from glustolibs.gluster.volume_libs import expand_volume
from glustolibs.gluster.brick_libs import (select_bricks_to_bring_offline,
bring_bricks_offline,
bring_bricks_online,
@@ -34,8 +32,6 @@ from glustolibs.gluster.heal_libs import (
is_heal_complete,
is_volume_in_split_brain,
is_shd_daemonized)
-from glustolibs.gluster.rebalance_ops import (rebalance_start,
- wait_for_rebalance_to_complete)
from glustolibs.gluster.heal_ops import trigger_heal
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import (collect_mounts_arequal, validate_io_procs,
@@ -43,12 +39,12 @@ from glustolibs.io.utils import (collect_mounts_arequal, validate_io_procs,
@runs_on([['replicated', 'distributed-replicated'],
- ['glusterfs', 'cifs', 'nfs']])
+ ['glusterfs', 'cifs']])
class TestSelfHeal(GlusterBaseClass):
"""
Description:
- Arbiter Test cases related to
- healing in default configuration of the volume
+ AFR Test cases related to healing in
+ default configuration of the volume
"""
@classmethod
@@ -121,12 +117,15 @@ class TestSelfHeal(GlusterBaseClass):
# Calling GlusterBaseClass teardown
self.get_super_method(self, 'tearDown')()
- def test_data_self_heal_daemon_off(self):
+ def test_data_self_heal_command(self):
"""
Test Data-Self-Heal (heal command)
Description:
- - set the volume option
+ - get the client side healing volume options and check
+ if they have already been disabled by default
+ NOTE: Client side healing has been disabled by default
+ since GlusterFS 6.0
"metadata-self-heal": "off"
"entry-self-heal": "off"
"data-self-heal": "off"
@@ -135,7 +134,7 @@ class TestSelfHeal(GlusterBaseClass):
- set the volume option
"self-heal-daemon": "off"
- bring down all bricks processes from selected set
- - Get areeual after getting bricks offline and compare with
+ - Get arequal after getting bricks offline and compare with
arequal before getting bricks offline
- modify the data
- bring bricks online
@@ -144,8 +143,6 @@ class TestSelfHeal(GlusterBaseClass):
- check daemons and start healing
- check if heal is completed
- check for split-brain
- - add bricks
- - do rebalance
- create 5k files
- while creating files - kill bricks and bring bricks online one by one
in cycle
@@ -153,15 +150,16 @@ class TestSelfHeal(GlusterBaseClass):
"""
# pylint: disable=too-many-statements
- # Setting options
- g.log.info('Setting options...')
- options = {"metadata-self-heal": "off",
- "entry-self-heal": "off",
- "data-self-heal": "off"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options %s' % options)
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
+ # Checking if Client side healing options are disabled by default
+ g.log.info('Checking Client side healing is disabled by default')
+ options = ('cluster.metadata-self-heal', 'cluster.data-self-heal',
+ 'cluster.entry-self-heal')
+ for option in options:
+ ret = get_volume_options(self.mnode, self.volname, option)[option]
+ self.assertTrue(bool(ret == 'off' or ret == 'off (DEFAULT)'),
+ "{} option is not disabled by default"
+ .format(option))
+ g.log.info("Client side healing options are disabled by default")
# Creating files on client side
for mount_obj in self.mounts:
@@ -193,13 +191,6 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Getting arequal before getting bricks offline '
'is successful')
- # Setting options
- g.log.info('Setting options...')
- options = {"self-heal-daemon": "off"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options %s' % options)
- g.log.info("Option 'self-heal-daemon' is set to 'off' successfully")
-
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
@@ -266,13 +257,6 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Bringing bricks %s online is successful',
bricks_to_bring_offline)
- # Setting options
- g.log.info('Setting options...')
- options = {"self-heal-daemon": "on"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options %s' % options)
- g.log.info("Option 'self-heal-daemon' is set to 'on' successfully")
-
# Wait for volume processes to be online
g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
@@ -281,7 +265,7 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info("Successful in waiting for volume %s processes to be "
"online", self.volname)
- # Verify volume's all process are online
+ # Verify volume's all processes are online
g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online"
@@ -313,23 +297,6 @@ class TestSelfHeal(GlusterBaseClass):
self.assertFalse(ret, 'Volume is in split-brain state')
g.log.info('Volume is not in split-brain state')
- # Add bricks
- g.log.info("Start adding bricks to volume...")
- ret = expand_volume(self.mnode, self.volname, self.servers,
- self.all_servers_info)
- self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
- g.log.info("Expanding volume is successful on "
- "volume %s", self.volname)
-
- # Do rebalance
- ret, _, _ = rebalance_start(self.mnode, self.volname)
- self.assertEqual(ret, 0, 'Failed to start rebalance')
- g.log.info('Rebalance is started')
-
- ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
- self.assertTrue(ret, 'Rebalance is not completed')
- g.log.info('Rebalance is completed successfully')
-
# Create 1k files
self.all_mounts_procs = []
for mount_obj in self.mounts:
@@ -402,42 +369,21 @@ class TestSelfHeal(GlusterBaseClass):
)
self.io_validation_complete = True
- def test_self_heal_50k_files_heal_command_by_add_brick(self):
+ def test_self_heal_50k_files_heal_default(self):
"""
- Test self-heal of 50k files (heal command
+ Test self-heal of 50k files by heal default
Description:
- - set the volume option
- "metadata-self-heal": "off"
- "entry-self-heal": "off"
- "data-self-heal": "off"
- "self-heal-daemon": "off"
- bring down all bricks processes from selected set
- create IO (50k files)
- Get arequal before getting bricks online
- - bring bricks online
- - set the volume option
- "self-heal-daemon": "on"
- - check for daemons
- - start healing
+ - check for daemons to come online
+ - heal daemon should pick up entries to heal automatically
- check if heal is completed
- check for split-brain
- get arequal after getting bricks online and compare with
arequal before getting bricks online
- - add bricks
- - do rebalance
- - get arequal after adding bricks and compare with
- arequal after getting bricks online
"""
# pylint: disable=too-many-locals,too-many-statements
- # Setting options
- g.log.info('Setting options...')
- options = {"metadata-self-heal": "off",
- "entry-self-heal": "off",
- "data-self-heal": "off",
- "self-heal-daemon": "off"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options')
- g.log.info("Successfully set %s for volume %s", options, self.volname)
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
@@ -494,13 +440,6 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Bringing bricks %s online is successful',
bricks_to_bring_offline)
- # Setting options
- g.log.info('Setting options...')
- options = {"self-heal-daemon": "on"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options %s' % options)
- g.log.info("Option 'self-heal-daemon' is set to 'on' successfully")
-
# Wait for volume processes to be online
g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
@@ -522,11 +461,7 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, "Either No self heal daemon process found")
g.log.info("All self-heal-daemons are online")
- # Start healing
- ret = trigger_heal(self.mnode, self.volname)
- self.assertTrue(ret, 'Heal is not started')
- g.log.info('Healing is started')
-
+ # Default Heal testing, wait for shd to pick up healing
# Monitor heal completion
ret = monitor_heal_completion(self.mnode, self.volname,
timeout_period=3600)
@@ -551,40 +486,8 @@ class TestSelfHeal(GlusterBaseClass):
# Checking arequals before bringing bricks online
# and after bringing bricks online
- self.assertItemsEqual(result_before_online, result_after_online,
- 'Checksums before and '
- 'after bringing bricks online are not equal')
+ self.assertEqual(result_before_online, result_after_online,
+ 'Checksums before and after bringing bricks online '
+ 'are not equal')
g.log.info('Checksums before and after bringing bricks online '
'are equal')
-
- # Add bricks
- g.log.info("Start adding bricks to volume...")
- ret = expand_volume(self.mnode, self.volname, self.servers,
- self.all_servers_info)
- self.assertTrue(ret, ("Failed to expand the volume when IO in "
- "progress on volume %s", self.volname))
- g.log.info("Expanding volume is successful on volume %s", self.volname)
-
- # Do rebalance
- ret, _, _ = rebalance_start(self.mnode, self.volname)
- self.assertEqual(ret, 0, 'Failed to start rebalance')
- g.log.info('Rebalance is started')
-
- ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
- self.assertTrue(ret, 'Rebalance is not completed')
- g.log.info('Rebalance is completed successfully')
-
- # Get arequal after adding bricks
- g.log.info('Getting arequal after adding bricks...')
- ret, result_after_adding_bricks = collect_mounts_arequal(self.mounts)
- self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting arequal after getting bricks '
- 'is successful')
-
- # Checking arequals after bringing bricks online
- # and after adding bricks
- self.assertItemsEqual(result_after_online, result_after_adding_bricks,
- 'Checksums after bringing bricks online and '
- 'after adding bricks are not equal')
- g.log.info('Checksums after bringing bricks online and '
- 'after adding bricks are equal')