summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/glusterd')
-rw-r--r--tests/functional/glusterd/test_brick_status_when_quorum_not_met.py9
-rw-r--r--tests/functional/glusterd/test_glusterd_quorum.py31
-rw-r--r--tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py10
-rw-r--r--tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py27
-rw-r--r--tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py43
-rw-r--r--tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py12
6 files changed, 78 insertions, 54 deletions
diff --git a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
index 992972246..2b7e5a560 100644
--- a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
@@ -28,7 +28,8 @@ from glustolibs.gluster.gluster_init import (
from glustolibs.gluster.brick_libs import (are_bricks_offline,
get_all_bricks)
from glustolibs.gluster.volume_ops import get_volume_status
-from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected
+from glustolibs.gluster.peer_ops import (
+ peer_probe_servers, is_peer_connected, wait_for_peers_to_connect)
@runs_on([['distributed-replicated'], ['glusterfs']])
@@ -131,12 +132,16 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
g.log.info("Glusterd started successfully on all servers except "
"last node %s", self.servers[1:5])
+ self.assertTrue(
+ wait_for_peers_to_connect(self.mnode, self.servers[1:5]),
+ "Peers are not in connected state")
+
# Verfiying node count in volume status after glusterd
# started on servers, Its not possible to check the brick status
# immediately after glusterd start, that's why verifying that all
# glusterd started nodes available in gluster volume status or not
count = 0
- while count < 200:
+ while count < 80:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 5:
diff --git a/tests/functional/glusterd/test_glusterd_quorum.py b/tests/functional/glusterd/test_glusterd_quorum.py
index 16691ce2f..9744370e8 100644
--- a/tests/functional/glusterd/test_glusterd_quorum.py
+++ b/tests/functional/glusterd/test_glusterd_quorum.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -60,37 +60,36 @@ class TestServerQuorum(GlusterBaseClass):
raise ExecutionError("Minimun four nodes required for this "
" testcase to execute")
- @classmethod
- def tearDownClass(cls):
+ def tearDown(self):
- # Setting quorum ratio to 51%
- ret = set_volume_options(cls.mnode, 'all',
- {'cluster.server-quorum-ratio': '51%'})
- if not ret:
- raise ExecutionError("Failed to set server quorum ratio on %s"
- % cls.volname)
-
- vol_list = get_volume_list(cls.mnode)
+ vol_list = get_volume_list(self.mnode)
if vol_list is None:
raise ExecutionError("Failed to get volume list")
for volume in vol_list:
- ret = cleanup_volume(cls.mnode, volume)
+ ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Failed Cleanup the volume")
g.log.info("Volume deleted successfully %s", volume)
+ # Setting quorum ratio to 51%
+ ret = set_volume_options(self.mnode, 'all',
+ {'cluster.server-quorum-ratio': '51%'})
+ if not ret:
+ raise ExecutionError("Failed to set server quorum ratio on %s"
+ % self.volname)
+
# Peer probe servers since we are doing peer detach in setUpClass
- for server in cls.servers:
- ret = is_peer_connected(server, cls.servers)
+ for server in self.servers:
+ ret = is_peer_connected(server, self.servers)
if not ret:
- ret = peer_probe_servers(server, cls.servers)
+ ret = peer_probe_servers(server, self.servers)
if not ret:
raise ExecutionError(
"Peer probe failed to one of the node")
g.log.info("Peer probe successful")
- cls.get_super_method(cls, 'tearDownClass')()
+ self.get_super_method(self, 'tearDown')()
@pytest.mark.test_glusterd_quorum_validation
def test_glusterd_quorum_validation(self):
diff --git a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
index 90e962b39..40012b009 100644
--- a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
+++ b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
@@ -29,7 +29,7 @@ from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.brick_ops import remove_brick
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.mount_ops import mount_volume, umount_volume
-from glustolibs.io.utils import validate_io_procs, wait_for_io_to_complete
+from glustolibs.io.utils import validate_io_procs
from glustolibs.gluster.gluster_init import restart_glusterd
@@ -166,11 +166,9 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- if ret:
- wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
- g.log.info("wait for io completed")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients")
self.io_validation_complete = True
remove_brick_list = bricks_list[2:4]
diff --git a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
index 6b6256872..ace2f2fb0 100644
--- a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
+++ b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
@@ -20,7 +20,6 @@
import sys
-from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
@@ -30,7 +29,10 @@ from glustolibs.gluster.brick_ops import add_brick
from glustolibs.gluster.rebalance_ops import (rebalance_start,
get_rebalance_status)
from glustolibs.gluster.gluster_init import (restart_glusterd,
- wait_for_glusterd_to_start)
+ wait_for_glusterd_to_start,
+ is_glusterd_running,
+ start_glusterd)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.io.utils import validate_io_procs
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.gluster.glusterdir import get_dir_contents
@@ -90,18 +92,19 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
"""
tearDown for every test
"""
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ g.log.info("Glusterd started successfully on %s", self.servers)
# checking for peer status from every node
- count = 0
- while count < 80:
- ret = self.validate_peers_are_connected()
- if ret:
- break
- sleep(2)
- count += 1
-
- if not ret:
- raise ExecutionError("Servers are not in peer probed state")
+ for server in self.servers:
+ ret = wait_for_peers_to_connect(server, self.servers)
+ if not ret:
+ raise ExecutionError("Servers are not in peer probed state")
# unmounting the volume and Cleaning up the volume
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
diff --git a/tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py b/tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py
index 3f8248be4..f1a523ce6 100644
--- a/tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py
+++ b/tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,14 +19,16 @@
Test Cases in this module related to test self heal
deamon and quota daemon status after reboot.
"""
-from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.quota_ops import quota_enable, quota_limit_usage
from glustolibs.gluster.volume_ops import get_volume_status
from glustolibs.misc.misc_libs import reboot_nodes_and_wait_to_come_online
-from glustolibs.gluster.gluster_init import is_glusterd_running
+from glustolibs.gluster.gluster_init import (is_glusterd_running,
+ start_glusterd,
+ wait_for_glusterd_to_start)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
@runs_on([['replicated', 'distributed-replicated'], ['glusterfs']])
@@ -47,6 +49,19 @@ class TestSelfHealDeamonQuotaDeamonAfterReboot(GlusterBaseClass):
g.log.info("Volume created successfully : %s", self.volname)
def tearDown(self):
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ g.log.info("Glusterd started successfully on %s", self.servers)
+
+ # checking for peer status from every node
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Peers are not in connected state")
+
# stopping the volume and Cleaning up the volume
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
if not ret:
@@ -144,20 +159,14 @@ class TestSelfHealDeamonQuotaDeamonAfterReboot(GlusterBaseClass):
g.log.info("Node %s rebooted successfully", self.servers[1])
# Checking glusterd status and peer status afte reboot of server
- count = 0
- while count < 100:
- ret = is_glusterd_running(self.servers[1])
- if not ret:
- ret = self.validate_peers_are_connected()
- if ret:
- g.log.info("glusterd is running and all peers are in "
- "connected state")
- break
- count += 1
- sleep(5)
- self.assertEqual(count, 100,
- "Either glusterd is not runnig or peers are "
- "not in connected state ")
+ self.assertTrue(
+ wait_for_glusterd_to_start(self.servers[1]),
+ "Failed to start glusterd on %s" % self.servers[1])
+ self.assertTrue(
+ wait_for_peers_to_connect(self.mnode, self.servers),
+ "some peers are not in connected state")
+ g.log.info("glusterd is running and all peers are in "
+ "connected state")
# Checks self heal daemon and quota daemon process running or not
ret = self.is_daemon_process_running()
diff --git a/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py b/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
index 2c8479542..64b968bb4 100644
--- a/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
+++ b/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
@@ -21,7 +21,9 @@ from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.gluster.volume_libs import setup_volume
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import (restart_glusterd,
- wait_for_glusterd_to_start)
+ wait_for_glusterd_to_start,
+ is_glusterd_running,
+ start_glusterd)
@runs_on([['distributed'], ['glusterfs']])
@@ -37,6 +39,14 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
def tearDown(self):
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ g.log.info("Glusterd started successfully on %s", self.servers)
+
ret = wait_for_peers_to_connect(self.mnode, self.servers)
self.assertTrue(ret, "glusterd is not connected %s with peer %s"
% (self.servers, self.servers))