summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py')
-rw-r--r--tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py53
1 files changed, 25 insertions, 28 deletions
diff --git a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
index c1288f9fe..f4f082311 100644
--- a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
+++ b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,8 +18,9 @@
Test restart glusterd while rebalance is in progress
"""
-from time import sleep
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import form_bricks_list_to_add_brick
@@ -27,7 +28,10 @@ from glustolibs.gluster.brick_ops import add_brick
from glustolibs.gluster.rebalance_ops import (rebalance_start,
get_rebalance_status)
from glustolibs.gluster.gluster_init import (restart_glusterd,
- is_glusterd_running)
+ wait_for_glusterd_to_start,
+ is_glusterd_running,
+ start_glusterd)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.io.utils import validate_io_procs
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.gluster.glusterdir import get_dir_contents
@@ -40,16 +44,14 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.counter = 1
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -83,24 +85,25 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
self.volname)
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
"""
tearDown for every test
"""
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ g.log.info("Glusterd started successfully on %s", self.servers)
# checking for peer status from every node
- count = 0
- while count < 80:
- ret = self.validate_peers_are_connected()
- if ret:
- break
- sleep(2)
- count += 1
-
- if not ret:
- raise ExecutionError("Servers are not in peer probed state")
+ for server in self.servers:
+ ret = wait_for_peers_to_connect(server, self.servers)
+ if not ret:
+ raise ExecutionError("Servers are not in peer probed state")
# unmounting the volume and Cleaning up the volume
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
@@ -127,7 +130,7 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 4 "
"--dir-length 6 "
@@ -176,13 +179,7 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
g.log.info("Glusterd restarted successfully on %s", self.servers)
# Checking glusterd status
- count = 0
- while count < 60:
- ret = is_glusterd_running(self.servers)
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "Glusterd is not running on some of the "
- "servers")
+ ret = wait_for_glusterd_to_start(self.servers)
+ self.assertTrue(ret, "Glusterd is not running on some of the "
+ "servers")
g.log.info("Glusterd is running on all servers %s", self.servers)