summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSri Vignesh <sselvan@redhat.com>2020-03-03 10:35:22 +0530
committerSri Vignesh <sselvan@redhat.com>2020-03-03 09:51:09 +0000
commitb9ab4a1c72b7841024facc753ae4ead8953857b0 (patch)
treed5b9e2f66f38ee25841b0619cbefb3b919f28c09
parent7635a3ba91b5931ddf6a29b6b35498fbe8401c96 (diff)
[testfix] Add timeout to fix failures
Add extra time for beaker machines to validate the testcases for test_rebalance_spurious.py added cleanup in teardown because fix layout patch is still not merged. Change-Id: I7ee8324ff136bbdb74600b730b4b802d86116427 Signed-off-by: Sri Vignesh <sselvan@redhat.com>
-rw-r--r--tests/functional/glusterd/test_brick_status_when_quorum_not_met.py2
-rw-r--r--tests/functional/glusterd/test_rebalance_spurious.py19
-rw-r--r--tests/functional/glusterd/test_replace_brick_quorum_not_met.py8
-rw-r--r--tests/functional/glusterd/test_volume_delete.py13
4 files changed, 31 insertions, 11 deletions
diff --git a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
index 2b7e5a560..2679bebee 100644
--- a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
@@ -141,7 +141,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
# immediately after glusterd start, that's why verifying that all
# glusterd started nodes available in gluster volume status or not
count = 0
- while count < 80:
+ while count < 200:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 5:
diff --git a/tests/functional/glusterd/test_rebalance_spurious.py b/tests/functional/glusterd/test_rebalance_spurious.py
index dab69b7b2..fa9f22084 100644
--- a/tests/functional/glusterd/test_rebalance_spurious.py
+++ b/tests/functional/glusterd/test_rebalance_spurious.py
@@ -25,12 +25,13 @@ from glustolibs.gluster.peer_ops import (peer_probe, peer_detach,
peer_probe_servers,
nodes_from_pool_list,
is_peer_connected)
-from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.lib_utils import (
+ form_bricks_list, get_servers_bricks_dict)
from glustolibs.gluster.brick_ops import remove_brick
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.rebalance_ops import (rebalance_start,
wait_for_fix_layout_to_complete)
-from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.glusterdir import mkdir, get_dir_contents
from glustolibs.gluster.mount_ops import mount_volume, umount_volume
from glustolibs.gluster.glusterfile import get_fattr
@@ -76,6 +77,20 @@ class TestSpuriousRebalance(GlusterBaseClass):
"servers %s" % self.servers)
g.log.info("Peer probe success for detached "
"servers %s", self.servers)
+
+ bricks = get_servers_bricks_dict(self.servers,
+ self.all_servers_info)
+
+ # Checking brick dir and cleaning it.
+ for server in self.servers:
+ for brick in bricks[server]:
+ if get_dir_contents(server, brick):
+ cmd = "rm -rf " + brick + "/*"
+ ret, _, _ = g.run(server, cmd)
+ if ret:
+ raise ExecutionError("Failed to delete the brick "
+ "dirs of deleted volume.")
+
self.get_super_method(self, 'tearDown')()
def test_spurious_rebalance(self):
diff --git a/tests/functional/glusterd/test_replace_brick_quorum_not_met.py b/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
index 00e1af9cc..70c79f0e8 100644
--- a/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
+++ b/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -147,7 +147,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
# on one of the server, Its not possible to check the brick status
# immediately in volume status after glusterd stop
count = 0
- while count < 120:
+ while count < 200:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 5:
@@ -195,7 +195,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
# on one of the servers, Its not possible to check the brick status
# immediately in volume status after glusterd start
count = 0
- while count < 120:
+ while count < 200:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 6:
@@ -205,7 +205,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
# Checking bricks are online or not
count = 0
- while count < 100:
+ while count < 200:
ret = are_bricks_online(self.mnode, self.volname,
self.brick_list[0:6])
if ret:
diff --git a/tests/functional/glusterd/test_volume_delete.py b/tests/functional/glusterd/test_volume_delete.py
index 89435e956..6f885f9a8 100644
--- a/tests/functional/glusterd/test_volume_delete.py
+++ b/tests/functional/glusterd/test_volume_delete.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,10 +21,11 @@ from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import (cleanup_volume, get_volume_list,
setup_volume)
-from glustolibs.gluster.volume_ops import (volume_stop)
+from glustolibs.gluster.volume_ops import (volume_stop, volume_start)
from glustolibs.gluster.brick_libs import get_all_bricks
from glustolibs.gluster.gluster_init import stop_glusterd, start_glusterd
-from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected
+from glustolibs.gluster.peer_ops import (
+ peer_probe_servers, wait_for_peers_to_connect)
@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed',
@@ -44,13 +45,17 @@ class TestVolumeDelete(GlusterBaseClass):
def tearDown(self):
+ # start the volume, it should succeed
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Volume stop failed")
+
# start glusterd on all servers
ret = start_glusterd(self.servers)
if not ret:
raise ExecutionError("Failed to start glusterd on all servers")
for server in self.servers:
- ret = is_peer_connected(server, self.servers)
+ ret = wait_for_peers_to_connect(server, self.servers)
if not ret:
ret = peer_probe_servers(server, self.servers)
if not ret: