From 16a3b1f8dd1492fe45861cbd9f3a7d10f988c843 Mon Sep 17 00:00:00 2001 From: Sri Vignesh Date: Thu, 20 Feb 2020 17:09:10 +0530 Subject: [testfix] Add steps to add peer_probe_servers in cleanup Change-Id: I0fa6bbacda16fb97d3454a8510a937442b5755a4 Signed-off-by: Sri Vignesh --- .../glusterd/test_add_brick_when_quorum_not_met.py | 9 +------ .../test_brick_status_when_quorum_not_met.py | 28 +++++++++++----------- .../test_glusterd_logs_when_peer_detach.py | 16 ++++++++----- .../glusterd/test_glusterd_split_brain.py | 26 ++++++++++++++++---- .../test_profile_start_with_quorum_not_met.py | 10 -------- .../glusterd/test_quorum_remove_brick.py | 28 +++++++--------------- .../test_remove_brick_when_quorum_not_met.py | 7 ------ .../glusterd/test_replace_brick_quorum_not_met.py | 9 ------- .../test_volume_set_with_quorum_enabled.py | 9 ------- .../test_volume_status_with_absent_bricks.py | 7 +++--- 10 files changed, 60 insertions(+), 89 deletions(-) (limited to 'tests/functional') diff --git a/tests/functional/glusterd/test_add_brick_when_quorum_not_met.py b/tests/functional/glusterd/test_add_brick_when_quorum_not_met.py index 0961decf6..7d8aad0e0 100644 --- a/tests/functional/glusterd/test_add_brick_when_quorum_not_met.py +++ b/tests/functional/glusterd/test_add_brick_when_quorum_not_met.py @@ -20,8 +20,7 @@ from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.volume_libs import setup_volume from glustolibs.gluster.volume_ops import (set_volume_options, - get_volume_status, - volume_reset) + get_volume_status) from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd, is_glusterd_running) from glustolibs.gluster.brick_libs import get_all_bricks, are_bricks_offline @@ -56,12 +55,6 @@ class TestAddBrickWhenQuorumNotMet(GlusterBaseClass): g.log.info("Peers are in connected state") - # reset quorum ratio to default - g.log.info("resetting quorum ratio") - ret, _, _ = volume_reset(self.mnode, 'all') - self.assertEqual(ret, 0, "Failed to reset quorum ratio") - g.log.info("Successfully resetted quorum ratio") - # stopping the volume and Cleaning up the volume ret = self.cleanup_volume() if not ret: diff --git a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py index 9ea9b1ee4..992972246 100644 --- a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py +++ b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py @@ -28,7 +28,7 @@ from glustolibs.gluster.gluster_init import ( from glustolibs.gluster.brick_libs import (are_bricks_offline, get_all_bricks) from glustolibs.gluster.volume_ops import get_volume_status -from glustolibs.gluster.peer_ops import wait_for_peers_to_connect +from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected @runs_on([['distributed-replicated'], ['glusterfs']]) @@ -55,19 +55,19 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass): if ret: ret = start_glusterd(self.servers) if not ret: - raise ExecutionError("Glusterd not started on some of " - "the servers") - ret = wait_for_peers_to_connect(self.mnode, self.servers) + raise ExecutionError("Failed to start glusterd on %s" + % self.servers) + # Takes 5 seconds to restart glusterd into peer connected state + sleep(5) + g.log.info("Glusterd started successfully on %s", self.servers) + + # checking for peer status from every node + ret = is_peer_connected(self.mnode, self.servers) if not ret: - raise ExecutionError("Servers are not in peer probed state") - - # Setting quorum ratio to 51% - ret = set_volume_options(self.mnode, 'all', - {'cluster.server-quorum-ratio': '51%'}) - self.assertTrue(ret, "Failed to set quorum ratio to 51 percentage on " - "servers %s" % self.servers) - g.log.info("Able to set server quorum ratio to 51 percentage " - "on servers %s", self.servers) + ret = peer_probe_servers(self.mnode, self.servers) + if not ret: + raise ExecutionError("Failed to peer probe failed in " + "servers %s" % self.servers) # stopping the volume and Cleaning up the volume ret = self.cleanup_volume() @@ -136,7 +136,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass): # immediately after glusterd start, that's why verifying that all # glusterd started nodes available in gluster volume status or not count = 0 - while count < 120: + while count < 200: vol_status = get_volume_status(self.mnode, self.volname) servers_count = len(vol_status[self.volname].keys()) if servers_count == 5: diff --git a/tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py b/tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py index 1275ad73a..2ffa50392 100644 --- a/tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py +++ b/tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -24,7 +24,8 @@ from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass from glustolibs.gluster.peer_ops import (peer_detach_servers, - peer_probe_servers) + peer_probe_servers, + is_peer_connected) class GlusterdLogsWhilePeerDetach(GlusterBaseClass): @@ -33,11 +34,14 @@ class GlusterdLogsWhilePeerDetach(GlusterBaseClass): """ tearDown for every test """ - # Peer probe detached server - ret = peer_probe_servers(self.mnode, self.random_server) + # checking for peer status from every node + ret = is_peer_connected(self.mnode, self.servers) if not ret: - raise ExecutionError(ret, "Failed to probe detached server") - g.log.info("peer probe is successful for %s", self.random_server) + ret = peer_probe_servers(self.mnode, self.random_server) + if not ret: + raise ExecutionError("Failed to peer probe failed in " + "servers %s" % self.random_server) + g.log.info("All peers are in connected state") # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')() diff --git a/tests/functional/glusterd/test_glusterd_split_brain.py b/tests/functional/glusterd/test_glusterd_split_brain.py index ca60398bb..c8954da26 100644 --- a/tests/functional/glusterd/test_glusterd_split_brain.py +++ b/tests/functional/glusterd/test_glusterd_split_brain.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. +# Copyright (C) 2017-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,11 +14,11 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -import time +from time import sleep from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on -from glustolibs.gluster.peer_ops import is_peer_connected +from glustolibs.gluster.peer_ops import is_peer_connected, peer_probe_servers from glustolibs.gluster.volume_libs import (cleanup_volume, setup_volume) from glustolibs.gluster.volume_ops import (get_volume_list, @@ -59,6 +59,24 @@ class GlusterdSplitBrainQuorumValidation(GlusterBaseClass): def tearDown(self): # stopping the volume and Cleaning up the volume self.get_super_method(self, 'tearDown')() + ret = is_glusterd_running(self.servers) + if ret: + ret = start_glusterd(self.servers) + if not ret: + raise ExecutionError("Failed to start glusterd on %s" + % self.servers) + # Takes 5 seconds to restart glusterd into peer connected state + sleep(5) + g.log.info("Glusterd started successfully on %s", self.servers) + + # checking for peer status from every node + ret = is_peer_connected(self.mnode, self.servers) + if not ret: + ret = peer_probe_servers(self.mnode, self.servers) + if not ret: + raise ExecutionError("Failed to peer probe failed in " + "servers %s" % self.servers) + g.log.info("All peers are in connected state") vol_list = get_volume_list(self.mnode) if vol_list is None: raise ExecutionError("Failed to get the volume list") @@ -138,7 +156,7 @@ class GlusterdSplitBrainQuorumValidation(GlusterBaseClass): if not ret: g.log.info("Peers are not connected state," " Retry after 2 seconds .......") - time.sleep(2) + sleep(2) counter = counter + 2 else: _rc = True diff --git a/tests/functional/glusterd/test_profile_start_with_quorum_not_met.py b/tests/functional/glusterd/test_profile_start_with_quorum_not_met.py index 7bb434fe3..2dd2e2f26 100644 --- a/tests/functional/glusterd/test_profile_start_with_quorum_not_met.py +++ b/tests/functional/glusterd/test_profile_start_with_quorum_not_met.py @@ -42,16 +42,6 @@ class TestProfileStartWithQuorumNotMet(GlusterBaseClass): def tearDown(self): - # Setting Quorum ratio to 51% - self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'} - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - if not ret: - raise ExecutionError("gluster volume set all cluster." - "server-quorum-ratio percentage " - "Failed :%s" % self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio 51 " - "percentage enabled successfully on :%s", self.servers) - # stopping the volume and Cleaning up the volume ret = self.cleanup_volume() if not ret: diff --git a/tests/functional/glusterd/test_quorum_remove_brick.py b/tests/functional/glusterd/test_quorum_remove_brick.py index 4e57c90f1..b936e4c48 100644 --- a/tests/functional/glusterd/test_quorum_remove_brick.py +++ b/tests/functional/glusterd/test_quorum_remove_brick.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -26,6 +26,7 @@ from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.volume_ops import set_volume_options from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd, is_glusterd_running) +from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected from glustolibs.gluster.brick_libs import get_all_bricks from glustolibs.gluster.brick_ops import remove_brick from glustolibs.gluster.volume_libs import form_bricks_list_to_remove_brick @@ -56,30 +57,19 @@ class TestServerQuorumNotMet(GlusterBaseClass): if not ret: raise ExecutionError("Failed to start glusterd on %s" % self.random_server) + # Takes 5 seconds to restart glusterd into peer connected state + sleep(5) g.log.info("Glusterd started successfully on %s", self.random_server) # checking for peer status from every node - count = 0 - while count < 80: - ret = self.validate_peers_are_connected() - if ret: - break - sleep(2) - count += 1 - + ret = is_peer_connected(self.mnode, self.servers) if not ret: - raise ExecutionError("Servers are not in peer probed state") + ret = peer_probe_servers(self.mnode, self.random_server) + if not ret: + raise ExecutionError("Failed to peer probe failed in " + "servers %s" % self.random_server) g.log.info("All peers are in connected state") - # Setting server-quorum-ratio to 51% - ret = set_volume_options(self.mnode, 'all', - {'cluster.server-quorum-ratio': '51%'}) - if not ret: - raise ExecutionError("Failed to set server quorum ratio for %s" - % self.servers) - g.log.info("Able to set server quorum ratio successfully for %s", - self.servers) - # stopping the volume and Cleaning up the volume ret = self.cleanup_volume() if not ret: diff --git a/tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py b/tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py index 5b1dc7216..b83ec78f5 100644 --- a/tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py +++ b/tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py @@ -57,13 +57,6 @@ class TestRemoveBrickWhenQuorumNotMet(GlusterBaseClass): if not ret: raise ExecutionError("Servers are not in connected state") - # Setting quorum ratio to 51% - ret = set_volume_options(self.mnode, 'all', - {'cluster.server-quorum-ratio': '51%'}) - if not ret: - raise ExecutionError("Failed to set server quorum ratio on %s" - % self.volname) - # stopping the volume and Cleaning up the volume ret = self.cleanup_volume() if not ret: diff --git a/tests/functional/glusterd/test_replace_brick_quorum_not_met.py b/tests/functional/glusterd/test_replace_brick_quorum_not_met.py index f89d963a5..00e1af9cc 100644 --- a/tests/functional/glusterd/test_replace_brick_quorum_not_met.py +++ b/tests/functional/glusterd/test_replace_brick_quorum_not_met.py @@ -60,15 +60,6 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass): if not ret: raise ExecutionError("Servers are not in peer probed state") - # Setting Quorum ratio to 51% - ret = set_volume_options(self.mnode, 'all', - {'cluster.server-quorum-ratio': '51%'}) - if not ret: - raise ExecutionError("Failed to set server quorum ratio on %s" - % self.servers) - g.log.info("Able to set server quorum ratio successfully on %s", - self.servers) - # stopping the volume and Cleaning up the volume ret = self.cleanup_volume() if not ret: diff --git a/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py b/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py index 23598f007..41719da63 100644 --- a/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py +++ b/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py @@ -56,15 +56,6 @@ class TestVolumeSetOpWithQuorum(GlusterBaseClass): % (self.mnode, self.servers)) g.log.info("Peers is in connected state.") - # Setting Quorum ratio to 51% - self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'} - ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) - if not ret: - raise ExecutionError("gluster volume set all cluster.server-quorum" - "-ratio percentage Failed :%s" % self.servers) - g.log.info("gluster volume set all cluster.server-quorum-ratio 51 " - "percentage enabled successfully on :%s", self.servers) - # stopping the volume and Cleaning up the volume ret = self.cleanup_volume() if not ret: diff --git a/tests/functional/glusterd/test_volume_status_with_absent_bricks.py b/tests/functional/glusterd/test_volume_status_with_absent_bricks.py index 5a3d60485..f47c6b61c 100644 --- a/tests/functional/glusterd/test_volume_status_with_absent_bricks.py +++ b/tests/functional/glusterd/test_volume_status_with_absent_bricks.py @@ -24,6 +24,7 @@ from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.volume_ops import (volume_create, volume_start, volume_status) +from glustolibs.gluster.volume_libs import cleanup_volume from glustolibs.gluster.lib_utils import form_bricks_list @@ -36,10 +37,10 @@ class TestVolumeStatusWithAbsentBricks(GlusterBaseClass): tearDown for every test """ # stopping the volume and Cleaning up the volume - ret = self.cleanup_volume() + ret = cleanup_volume(self.mnode, self.volname) if not ret: - raise ExecutionError("Failed Cleanup the Volume %s" - % self.volname) + raise ExecutionError("Failed to cleanup volume") + g.log.info("Volume deleted successfully : %s", self.volname) # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')() -- cgit