summaryrefslogtreecommitdiffstats
path: root/tests/functional/afr/heal/test_self_heal_daemon_process.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/afr/heal/test_self_heal_daemon_process.py')
-rwxr-xr-xtests/functional/afr/heal/test_self_heal_daemon_process.py41
1 files changed, 21 insertions, 20 deletions
diff --git a/tests/functional/afr/heal/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py
index edb4575eb..ea598b1fc 100755
--- a/tests/functional/afr/heal/test_self_heal_daemon_process.py
+++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,9 +18,11 @@
Test Cases in this module tests the self heal daemon process.
"""
-import time
import calendar
+import time
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import (
@@ -37,7 +39,8 @@ from glustolibs.gluster.heal_libs import (get_self_heal_daemon_pid,
is_shd_daemonized,
are_all_self_heal_daemons_are_online)
from glustolibs.gluster.volume_ops import (volume_stop, volume_start)
-from glustolibs.gluster.gluster_init import restart_glusterd
+from glustolibs.gluster.gluster_init import (
+ restart_glusterd, wait_for_glusterd_to_start)
from glustolibs.io.utils import validate_io_procs
from glustolibs.misc.misc_libs import upload_scripts
@@ -55,16 +58,14 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, [script_local_path])
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -78,7 +79,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
"""
# calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -103,7 +104,6 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
"""
Clean up the volume and umount volume from client
"""
-
# stopping the volume
g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
@@ -112,7 +112,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
g.log.info("Successful in Unmount Volume and Cleanup Volume")
# calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_glustershd_with_add_remove_brick(self):
"""
@@ -356,6 +356,10 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
g.log.info("Successfully restarted glusterd on all nodes %s",
nodes)
+ self.assertTrue(
+ wait_for_glusterd_to_start(self.servers),
+ "Failed to start glusterd on %s" % self.servers)
+
# check the self heal daemon process after restarting glusterd process
g.log.info("Starting to get self-heal daemon process on"
" nodes %s", nodes)
@@ -445,10 +449,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks']
# bring bricks offline
g.log.info("Going to bring down the brick process "
@@ -529,10 +530,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks']
# Bring brick offline
g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
@@ -548,11 +546,14 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
bricks_to_bring_offline)
# Creating files for all volumes
+ self.all_mounts_procs = []
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s",
mount_obj.client_system, mount_obj.mountpoint)
- cmd = ("python %s create_files -f 100 %s/test_dir"
- % (self.script_upload_path, mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files -f 100 "
+ "%s/test_dir" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)