diff options
author | Sri Vignesh <sselvan@redhat.com> | 2020-01-06 14:11:48 +0530 |
---|---|---|
committer | Bala Konda Reddy M <bmekala@redhat.com> | 2020-01-21 09:43:29 +0000 |
commit | 674277c7c6e66f00bf15b50d223b7b2ec0b8675c (patch) | |
tree | 467175be2af08bbb18c3bdafabd02b7bfdee3f69 | |
parent | 1a6c15c9d2721b94ebfa05e399dd3b7719d4843d (diff) |
[libfix][testfix] Add waiter function for glusterd and peer connected library files
Moving waiters from testcase and adding it as function in library in gluster_init and peer_ops.
Change-Id: I5ab1e42a5a0366fadb399789da1c156d8d96ec18
Signed-off-by: Sri Vignesh <sselvan@redhat.com>
3 files changed, 74 insertions, 36 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_init.py b/glustolibs-gluster/glustolibs/gluster/gluster_init.py index 7467479b4..047046737 100644 --- a/glustolibs-gluster/glustolibs/gluster/gluster_init.py +++ b/glustolibs-gluster/glustolibs/gluster/gluster_init.py @@ -19,6 +19,7 @@ Description: This file contains the methods for starting/stopping glusterd and other initial gluster environment setup helpers. """ +from time import sleep from glusto.core import Glusto as g @@ -222,3 +223,31 @@ def get_glusterd_pids(nodes): glusterd_pids[node] = ['-1'] return _rc, glusterd_pids + + +def wait_for_glusterd_to_start(servers, glusterd_start_wait_timeout=80): + """Checks glusterd is running on nodes with timeout. + + Args: + servers (str|list): A server|List of server hosts on which glusterd + status has to be checked. + glusterd_start_wait_timeout: timeout to retry glusterd running + check in node. + + Returns: + bool : True if glusterd is running on servers. + False otherwise. + + """ + if not isinstance(servers, list): + servers = [servers] + count = 0 + while count <= glusterd_start_wait_timeout: + ret = is_glusterd_running(servers) + if not ret: + g.log.info("glusterd is running on %s", servers) + return True + sleep(1) + count += 1 + g.log.error("glusterd is not running on %s", servers) + return False diff --git a/glustolibs-gluster/glustolibs/gluster/peer_ops.py b/glustolibs-gluster/glustolibs/gluster/peer_ops.py index 988a13cb7..7027ce11f 100644 --- a/glustolibs-gluster/glustolibs/gluster/peer_ops.py +++ b/glustolibs-gluster/glustolibs/gluster/peer_ops.py @@ -20,10 +20,10 @@ """ -from glusto.core import Glusto as g import re -import time import socket +from time import sleep +from glusto.core import Glusto as g try: import xml.etree.cElementTree as etree except ImportError: @@ -166,7 +166,7 @@ def peer_probe_servers(mnode, servers, validate=True, time_delay=10): # Validating whether peer is in connected state after peer probe if validate: - time.sleep(time_delay) + sleep(time_delay) if not is_peer_connected(mnode, servers): g.log.error("Validation after peer probe failed.") return False @@ -212,7 +212,7 @@ def peer_detach_servers(mnode, servers, force=False, validate=True, # Validating whether peer detach is successful if validate: - time.sleep(time_delay) + sleep(time_delay) nodes_in_pool = nodes_from_pool_list(mnode) rc = True for server in servers: @@ -421,3 +421,32 @@ def is_peer_connected(mnode, servers): g.log.info("Servers: '%s' are all 'Peer in Cluster' and 'Connected' " "state.", servers) return True + + +def wait_for_peers_to_connect(mnode, servers, wait_timeout=30): + """Checks nodes are peer connected with timeout. + + Args: + mnode: node on which cmd has to be executed. + servers (str|list): A server|List of server hosts on which peer + status has to be checked. + wait_timeout: timeout to retry connected status check in node. + + Returns: + bool : True if all the peers are connected. + False otherwise. + + """ + if not isinstance(servers, str): + servers = [servers] + + count = 0 + while count <= wait_timeout: + ret = is_peer_connected(mnode, servers) + if ret: + g.log.info("peers in connected state: %s", servers) + return True + sleep(1) + count += 1 + g.log.error("Peers are not in connected state: %s", servers) + return False diff --git a/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py b/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py index a041f676a..7d56abd83 100644 --- a/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py +++ b/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com> +# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,7 +14,6 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -from time import sleep from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.volume_ops import (volume_create, volume_start, @@ -23,14 +22,14 @@ from glustolibs.gluster.volume_ops import (volume_create, volume_start, from glustolibs.gluster.brick_libs import get_all_bricks from glustolibs.gluster.volume_libs import (cleanup_volume) from glustolibs.gluster.peer_ops import (peer_probe, peer_detach, - is_peer_connected, peer_probe_servers, peer_detach_servers, - nodes_from_pool_list) + nodes_from_pool_list, + wait_for_peers_to_connect) from glustolibs.gluster.lib_utils import form_bricks_list from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_init import (start_glusterd, stop_glusterd, - is_glusterd_running) + wait_for_glusterd_to_start) @runs_on([['distributed'], ['glusterfs']]) @@ -131,14 +130,8 @@ class TestBrickPortAfterModifyVolume(GlusterBaseClass): ret = stop_glusterd(self.servers[1]) self.assertTrue(ret, "Failed to stop glusterd on one of the node") - count = 0 - while count < 60: - ret = is_glusterd_running(self.servers[1]) - if ret: - break - sleep(2) - count += 1 - self.assertEqual(ret, 1, "glusterd is still running on %s" + ret = wait_for_glusterd_to_start(self.servers[1]) + self.assertFalse(ret, "glusterd is still running on %s" % self.servers[1]) g.log.info("Glusterd stop on the nodes : %s " "succeeded", self.servers[1]) @@ -155,28 +148,15 @@ class TestBrickPortAfterModifyVolume(GlusterBaseClass): self.assertTrue(ret, "Failed to start glusterd on one of the node") g.log.info("Glusterd start on the nodes : %s " "succeeded", self.servers[1]) - count = 0 - while count < 60: - ret = is_glusterd_running(self.servers[1]) - if not ret: - break - sleep(2) - count += 1 - - self.assertEqual(ret, 0, "glusterd is not running on %s" - % self.servers[1]) + ret = wait_for_glusterd_to_start(self.servers[1]) + self.assertTrue(ret, "glusterd is not running on %s" + % self.servers[1]) g.log.info("Glusterd start on the nodes : %s " "succeeded", self.servers[1]) - count = 0 - while count < 60: - ret = is_peer_connected(self.servers[0], self.servers[1]) - if ret: - break - sleep(2) - count += 1 - self.assertEqual(ret, 1, "glusterd is not connected %s with peer %s" - % (self.servers[0], self.servers[1])) + ret = wait_for_peers_to_connect(self.servers[0], self.servers[1]) + self.assertTrue(ret, "glusterd is not connected %s with peer %s" + % (self.servers[0], self.servers[1])) vol_status = get_volume_status(self.mnode, self.volname) self.assertIsNotNone(vol_status, "Failed to get volume " |