summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/functional/glusterd/test_brick_status_when_quorum_not_met.py2
-rw-r--r--tests/functional/glusterd/test_bricks_online_after_node_reboot.py4
-rw-r--r--tests/functional/glusterd/test_create_vol_with_used_bricks.py2
-rw-r--r--tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py10
-rw-r--r--tests/functional/glusterd/test_lower_gluster_op_version.py6
-rw-r--r--tests/functional/glusterd/test_op_version.py5
-rw-r--r--tests/functional/glusterd/test_rebalance_when_quorum_not_met.py21
-rw-r--r--tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py14
-rw-r--r--tests/functional/glusterd/test_replace_brick_quorum_not_met.py4
-rw-r--r--tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py34
-rw-r--r--tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py33
-rw-r--r--tests/functional/glusterd/test_volume_set_with_quorum_enabled.py15
12 files changed, 57 insertions, 93 deletions
diff --git a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
index d08d8c872..9ea9b1ee4 100644
--- a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
@@ -136,7 +136,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
# immediately after glusterd start, that's why verifying that all
# glusterd started nodes available in gluster volume status or not
count = 0
- while count < 80:
+ while count < 120:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 5:
diff --git a/tests/functional/glusterd/test_bricks_online_after_node_reboot.py b/tests/functional/glusterd/test_bricks_online_after_node_reboot.py
index b91e25c55..87f6301a3 100644
--- a/tests/functional/glusterd/test_bricks_online_after_node_reboot.py
+++ b/tests/functional/glusterd/test_bricks_online_after_node_reboot.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -73,7 +73,7 @@ class BricksOnlineAfterNodeReboot(GlusterBaseClass):
def check_node_after_reboot(self, server):
count = 0
- while count < 60:
+ while count < 80:
ret = is_glusterd_running(server)
if not ret:
ret = self.validate_peers_are_connected()
diff --git a/tests/functional/glusterd/test_create_vol_with_used_bricks.py b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
index 7c48b920f..2c18bcb6c 100644
--- a/tests/functional/glusterd/test_create_vol_with_used_bricks.py
+++ b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
@@ -19,6 +19,7 @@
"""
import sys
+from time import sleep
from glusto.core import Glusto as g
@@ -149,6 +150,7 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
self.assertEqual(ret, 0, "Volume %s is not unmounted" % (
self.volname))
g.log.info("Volume unmounted successfully : %s", self.volname)
+ sleep(2)
ret = rmdir(mount_obj.client_system, mount_obj.mountpoint)
self.assertTrue(ret, "Failed to remove directory mount directory.")
g.log.info("Mount directory is removed successfully")
diff --git a/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py b/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py
index 95aa68739..7eeb5f93d 100644
--- a/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py
+++ b/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py
@@ -17,6 +17,7 @@ import random
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import file_exists
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.volume_ops import (volume_create,
set_volume_options, volume_start)
@@ -105,12 +106,10 @@ class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass):
# Validate files /var/lib/glusterd/snaps on all the servers is same
self.pathname = "/var/lib/glusterd/snaps/%s" % self.snapname
for server in self.servers:
- conn = g.rpyc_get_connection(server)
- ret = conn.modules.os.path.isdir(self.pathname)
+ ret = file_exists(server, self.pathname)
self.assertTrue(ret, "%s directory doesn't exist on node %s" %
(self.pathname, server))
g.log.info("%s path exists on node %s", self.pathname, server)
- g.rpyc_close_deployed_servers()
# Peer detach one node
self.random_node_peer_detach = random.choice(self.servers[1:])
@@ -121,11 +120,10 @@ class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass):
g.log.info("Peer detach succeeded")
# /var/lib/glusterd/snaps/<snapname> directory should not present
- conn = g.rpyc_get_connection(self.random_node_peer_detach)
- ret = conn.modules.os.path.isdir(self.pathname)
+
+ ret = file_exists(self.random_node_peer_detach, self.pathname)
self.assertFalse(ret, "%s directory should not exist on the peer"
"which is detached from cluster%s" % (
self.pathname, self.random_node_peer_detach))
g.log.info("Expected: %s path doesn't exist on peer detached node %s",
self.pathname, self.random_node_peer_detach)
- g.rpyc_close_deployed_servers()
diff --git a/tests/functional/glusterd/test_lower_gluster_op_version.py b/tests/functional/glusterd/test_lower_gluster_op_version.py
index e3e7bc245..5efc5d7b0 100644
--- a/tests/functional/glusterd/test_lower_gluster_op_version.py
+++ b/tests/functional/glusterd/test_lower_gluster_op_version.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,8 +22,8 @@ from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import cleanup_volume
-from glustolibs.gluster.volume_libs import (get_volume_options,
- set_volume_options)
+from glustolibs.gluster.volume_ops import (get_volume_options,
+ set_volume_options)
@runs_on([['replicated'], ['glusterfs']])
diff --git a/tests/functional/glusterd/test_op_version.py b/tests/functional/glusterd/test_op_version.py
index 7e6d99466..95735bec9 100644
--- a/tests/functional/glusterd/test_op_version.py
+++ b/tests/functional/glusterd/test_op_version.py
@@ -22,6 +22,7 @@
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import file_exists
from glustolibs.gluster.volume_ops import (get_volume_options,
set_volume_options)
@@ -81,11 +82,9 @@ class TestMaxSupportedOpVersion(GlusterBaseClass):
# Checking vol file exist in all servers or not
file_path = '/var/lib/glusterd/vols/' + self.volname + '/info'
for server in self.servers:
- conn = g.rpyc_get_connection(server)
- ret = conn.modules.os.path.isfile(file_path)
+ ret = file_exists(server, file_path)
self.assertTrue(ret, "Vol file not found in server %s" % server)
g.log.info("vol file found in server %s", server)
- g.rpyc_close_deployed_servers()
# Getting version number from vol info file
# cmd: grepping version from vol info file
diff --git a/tests/functional/glusterd/test_rebalance_when_quorum_not_met.py b/tests/functional/glusterd/test_rebalance_when_quorum_not_met.py
index a96e1d2b6..dcc49936b 100644
--- a/tests/functional/glusterd/test_rebalance_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_rebalance_when_quorum_not_met.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,17 +17,17 @@
""" Description:
Test rebalance operation when quorum not met
"""
-from time import sleep
import random
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
is_glusterd_running)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.gluster.rebalance_ops import rebalance_start
from glustolibs.gluster.volume_ops import (volume_status,
- volume_stop, volume_start)
+ volume_stop, volume_start,
+ set_volume_options)
@runs_on([['distributed', 'dispersed', 'distributed-dispersed'],
@@ -58,16 +58,9 @@ class TestServerQuorumNotMet(GlusterBaseClass):
% self.random_server)
# checking for peer status from every node
- count = 0
- while count < 80:
- ret = self.validate_peers_are_connected()
- if ret:
- break
- sleep(2)
- count += 1
-
- if not ret:
- raise ExecutionError("Servers are not in peer probed state")
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ self.assertTrue(ret, "glusterd is not connected %s with peer %s"
+ % (self.mnode, self.servers))
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
diff --git a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
index 4dd2d3d13..1cca5bc88 100644
--- a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
+++ b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
@@ -29,9 +29,7 @@ from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.brick_ops import remove_brick
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.mount_ops import mount_volume, umount_volume
-from glustolibs.io.utils import (
- wait_for_io_to_complete,
- validate_io_procs)
+from glustolibs.io.utils import validate_io_procs, wait_for_io_to_complete
from glustolibs.gluster.glusterdir import rmdir
from glustolibs.gluster.gluster_init import restart_glusterd
@@ -173,15 +171,13 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
self.all_mounts_procs.append(proc)
self.io_validation_complete = False
- # wait for io to complete
- self.assertTrue(
- wait_for_io_to_complete(self.all_mounts_procs, self.mounts),
- "Io failed to complete on some of the clients")
-
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
+ if ret:
+ wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
+ g.log.info("wait for io completed")
+ self.io_validation_complete = True
remove_brick_list = bricks_list[2:4]
ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
diff --git a/tests/functional/glusterd/test_replace_brick_quorum_not_met.py b/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
index a6dc531d5..f89d963a5 100644
--- a/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
+++ b/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
@@ -156,7 +156,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
# on one of the server, Its not possible to check the brick status
# immediately in volume status after glusterd stop
count = 0
- while count < 100:
+ while count < 120:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 5:
@@ -204,7 +204,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
# on one of the servers, Its not possible to check the brick status
# immediately in volume status after glusterd start
count = 0
- while count < 100:
+ while count < 120:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 6:
diff --git a/tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py b/tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py
index abcdf30cf..c1f11f3a2 100644
--- a/tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py
+++ b/tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,8 +22,10 @@ from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.volume_ops import set_volume_options, get_volume_info
-from glustolibs.gluster.gluster_init import start_glusterd, is_glusterd_running
+from glustolibs.gluster.volume_ops import (
+ set_volume_options, get_volume_info)
+from glustolibs.gluster.gluster_init import (
+ start_glusterd, wait_for_glusterd_to_start)
from glustolibs.gluster.volume_libs import setup_volume
from glustolibs.gluster.peer_ops import (peer_probe_servers,
peer_detach_servers,
@@ -58,7 +60,7 @@ class VolumeInfoSync(GlusterBaseClass):
raise ExecutionError("Failed to probe detached "
"servers %s" % self.servers)
- # stopping the volume and Cleaning up the volume
+ # stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to Cleanup the Volume %s"
@@ -104,15 +106,9 @@ class VolumeInfoSync(GlusterBaseClass):
ret = start_glusterd(random_server)
self.assertTrue(ret, "Failed to start glusterd on %s" % random_server)
- count = 0
- while count < 60:
- ret = is_glusterd_running(random_server)
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "glusterd is not running on %s"
- % random_server)
+ ret = wait_for_glusterd_to_start(random_server)
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % random_server)
g.log.info("glusterd is started and running on %s", random_server)
# volume info should be synced across the cluster
@@ -152,15 +148,9 @@ class VolumeInfoSync(GlusterBaseClass):
ret = start_glusterd(random_server)
self.assertTrue(ret, "Failed to start glusterd on %s" % random_server)
- count = 0
- while count < 60:
- ret = is_glusterd_running(random_server)
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "glusterd is not running on %s"
- % random_server)
+ ret = wait_for_glusterd_to_start(random_server)
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % random_server)
g.log.info("glusterd is started and running on %s", random_server)
# peer status should be synced across the cluster
diff --git a/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py b/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
index 9a7f764d5..2c8479542 100644
--- a/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
+++ b/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,14 +14,14 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.gluster.volume_libs import setup_volume
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import (restart_glusterd,
- is_glusterd_running)
+ wait_for_glusterd_to_start)
@runs_on([['distributed'], ['glusterfs']])
@@ -37,16 +37,9 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
def tearDown(self):
- count = 0
- while count < 60:
- ret = self.validate_peers_are_connected()
- if ret:
- break
- sleep(2)
- count += 1
-
- if not ret:
- raise ExecutionError("Peers are not in connected state")
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ self.assertTrue(ret, "glusterd is not connected %s with peer %s"
+ % (self.servers, self.servers))
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
@@ -91,11 +84,9 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
ret = restart_glusterd(self.mnode)
self.assertTrue(ret, "Failed to restart the glusterd on %s"
% self.mnode)
- count = 0
- while count < 60:
- ret = is_glusterd_running(self.mnode)
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "glusterd is not running on %s" % self.mnode)
+
+ ret = wait_for_glusterd_to_start(self.servers)
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % self.servers)
+ g.log.info("Glusterd start on the nodes : %s "
+ "succeeded", self.servers)
diff --git a/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py b/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py
index 68078387a..23598f007 100644
--- a/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py
+++ b/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,6 +18,7 @@ from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import start_glusterd, stop_glusterd
@@ -50,15 +51,9 @@ class TestVolumeSetOpWithQuorum(GlusterBaseClass):
g.log.info("Successfully started glusterd.")
# Checking if peer is connected.
- counter = 0
- while counter < 30:
- ret = self.validate_peers_are_connected()
- counter += 1
- if ret:
- break
- sleep(3)
- if not ret:
- raise ExecutionError("Peer is not in connected state.")
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ self.assertTrue(ret, "glusterd is not connected %s with peer %s"
+ % (self.mnode, self.servers))
g.log.info("Peers is in connected state.")
# Setting Quorum ratio to 51%