summaryrefslogtreecommitdiffstats
path: root/tests/functional/afr
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/afr')
-rw-r--r--tests/functional/afr/test_self_heal_daemon_process.py159
1 files changed, 159 insertions, 0 deletions
diff --git a/tests/functional/afr/test_self_heal_daemon_process.py b/tests/functional/afr/test_self_heal_daemon_process.py
index aba25e572..f3c416687 100644
--- a/tests/functional/afr/test_self_heal_daemon_process.py
+++ b/tests/functional/afr/test_self_heal_daemon_process.py
@@ -32,6 +32,7 @@ from glustolibs.gluster.brick_libs import (get_all_bricks,
bring_bricks_online,
are_bricks_online,
select_bricks_to_bring_offline)
+from glustolibs.gluster.brick_ops import replace_brick
from glustolibs.gluster.heal_libs import (get_self_heal_daemon_pid,
do_bricks_exist_in_shd_volfile,
is_shd_daemonized,
@@ -502,3 +503,161 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
bricks_to_bring_offline)
self.assertTrue(ret, ("Not all bricks are online"))
g.log.info("All bricks are online.")
+
+
+@runs_on([['replicated', 'distributed-replicated'],
+ ['glusterfs', 'nfs', 'cifs']])
+class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
+ """
+ ClientSideQuorumTests contains tests which verifies the
+ client side quorum Test Cases
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Override Volumes
+ if cls.volume_type == "distributed-replicated":
+ # Define distributed-replicated volume
+ cls.volume['voltype'] = {
+ 'type': 'distributed-replicated',
+ 'dist_count': 2,
+ 'replica_count': 3,
+ 'arbiter_count': 1,
+ 'transport': 'tcp'}
+
+ cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol"
+
+ def setUp(self):
+ """
+ setUp method for every test
+ """
+
+ # calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ self.all_mounts_procs = []
+ self.io_validation_complete = False
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup Volume %s" % self.volname)
+ ret = self.setup_volume_and_mount_volume(self.mounts,
+ volume_create_force=False)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ def tearDown(self):
+ """
+ If test method failed before validating IO, tearDown waits for the
+ IO's to complete and checks for the IO exit status
+
+ Cleanup and umount volume
+ """
+ # Cleanup and umount volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass teardown
+ GlusterBaseClass.tearDown.im_func(self)
+
+ def test_impact_of_replace_brick_for_glustershd(self):
+ nodes = self.volume['servers']
+
+ # check the self-heal daemon process
+ g.log.info("Starting to get self-heal daemon process on "
+ "nodes %s" % nodes)
+ ret, pids = get_self_heal_daemon_pid(nodes)
+ self.assertTrue(ret, ("Either No self heal daemon process found or "
+ "more than One self heal daemon process "
+ "found : %s" % pids))
+ g.log.info("Successful in getting Single self heal daemon process"
+ " on all nodes %s", nodes)
+ glustershd_pids = pids
+
+ # get the bricks for the volume
+ g.log.info("Fetching bricks for the volume : %s" % self.volname)
+ bricks_list = get_all_bricks(self.mnode, self.volname)
+ g.log.info("Brick List : %s" % bricks_list)
+
+ # validate the bricks present in volume info with
+ # glustershd server volume file
+ g.log.info("Starting parsing file %s on "
+ "node %s" % (self.GLUSTERSHD, self.mnode))
+ ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname,
+ bricks_list)
+ self.assertTrue(ret, ("Brick List from volume info is different "
+ "from glustershd server volume file. "
+ "Please check log file for details"))
+ g.log.info("Successfully parsed %s file" % self.GLUSTERSHD)
+
+ # replace brick
+ brick_to_replace = bricks_list[-1]
+ new_brick = brick_to_replace + 'new'
+ g.log.info("Replacing the brick %s for the volume : %s"
+ % (brick_to_replace, self.volname))
+ ret, out, err = replace_brick(self.mnode, self.volname,
+ brick_to_replace, new_brick)
+ self.assertFalse(ret, err)
+ g.log.info('Replaced brick %s to %s successfully'
+ % (brick_to_replace, new_brick))
+
+ # check bricks
+ bricks_list = get_all_bricks(self.mnode, self.volname)
+ self.assertEqual(bricks_list[-1], new_brick, 'Replaced brick and '
+ 'new brick are not equal')
+
+ # Verify volume's all process are online for 60 sec
+ g.log.info("Verifying volume's all process are online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname,
+ timeout=60)
+ self.assertTrue(ret, ("Volume %s : All process are not "
+ "online", self.volname))
+ g.log.info("Successfully Verified volume %s processes are online",
+ self.volname)
+
+ # Verify glustershd process releases its parent process
+ ret = is_shd_daemonized(nodes)
+ self.assertTrue(ret, ("Either No self heal daemon process found or "
+ "more than One self heal daemon process found"))
+
+ # check the self-heal daemon process
+ g.log.info("Starting to get self-heal daemon process on "
+ "nodes %s" % nodes)
+ ret, pids = get_self_heal_daemon_pid(nodes)
+ self.assertTrue(ret, ("Either No self heal daemon process found or "
+ "more than One self heal daemon process "
+ "found : %s" % pids))
+ g.log.info("Successful in getting Single self heal daemon process"
+ " on all nodes %s", nodes)
+ glustershd_pids_after_replacement = pids
+
+ # Compare pids before and after replacing
+ self.assertNotEqual(glustershd_pids,
+ glustershd_pids_after_replacement,
+ "Self Daemon process is same before and"
+ " after replacing bricks")
+ g.log.info("Self Heal Daemon Process is different before and "
+ "after replacing bricks")
+
+ # get the bricks for the volume after replacing
+ bricks_list_after_replacing = get_all_bricks(self.mnode, self.volname)
+ g.log.info("Brick List after expanding "
+ "volume: %s" % bricks_list_after_replacing)
+
+ # validate the bricks present in volume info
+ # with glustershd server volume file after replacing bricks
+ g.log.info("Starting parsing file %s" % self.GLUSTERSHD)
+ ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname,
+ bricks_list_after_replacing)
+
+ self.assertTrue(ret, ("Brick List from volume info is different "
+ "from glustershd server volume file after "
+ "replacing bricks. Please check log file "
+ "for details"))
+ g.log.info("Successfully parsed %s file" % self.GLUSTERSHD)