From 25064013d6c0889db3d2bf931b0b2d01c72f6a96 Mon Sep 17 00:00:00 2001 From: Sri Vignesh Date: Fri, 27 Dec 2019 12:16:33 +0530 Subject: Add steps to stabilize the existing content in glusterd Added wait_for_io_to_complete function to testcases used wait_for_glusterd function and wait_for_peer_connect function Change-Id: I4811848aad8cca4198cc93d8e200dfc47ae7ac9b Signed-off-by: Sri Vignesh --- .../test_brick_status_when_quorum_not_met.py | 19 ++++------ .../glusterd/test_change_reserve_limit.py | 11 +----- .../glusterd/test_create_vol_with_used_bricks.py | 39 +++++++++++--------- .../glusterd/test_detach_node_used_to_mount.py | 22 ++++++------ .../test_glusterd_snap_info_on_detached_node.py | 28 +++++---------- .../test_mount_after_removing_client_logs_dir.py | 9 +++-- .../glusterd/test_ops_when_one_node_is_down.py | 21 ++++++----- tests/functional/glusterd/test_peer_detach.py | 22 +++++++----- .../test_profile_operations_with_one_node_down.py | 42 ++++++++-------------- tests/functional/glusterd/test_rebalance_hang.py | 30 ++++++++-------- .../functional/glusterd/test_rebalance_new_node.py | 9 ++++- .../functional/glusterd/test_rebalance_spurious.py | 9 +++-- .../test_remove_brick_after_restart_glusterd.py | 17 +++++++-- .../test_restart_glusterd_while_rebalancing.py | 18 ++++------ 14 files changed, 143 insertions(+), 153 deletions(-) (limited to 'tests') diff --git a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py index 71209c2e5..d08d8c872 100644 --- a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py +++ b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -23,11 +23,12 @@ from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.volume_ops import set_volume_options -from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd, - is_glusterd_running) +from glustolibs.gluster.gluster_init import ( + is_glusterd_running, start_glusterd, stop_glusterd) from glustolibs.gluster.brick_libs import (are_bricks_offline, get_all_bricks) from glustolibs.gluster.volume_ops import get_volume_status +from glustolibs.gluster.peer_ops import wait_for_peers_to_connect @runs_on([['distributed-replicated'], ['glusterfs']]) @@ -56,15 +57,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass): if not ret: raise ExecutionError("Glusterd not started on some of " "the servers") - # checking for peer status from every node - count = 0 - while count < 80: - ret = self.validate_peers_are_connected() - if ret: - break - sleep(2) - count += 1 - + ret = wait_for_peers_to_connect(self.mnode, self.servers) if not ret: raise ExecutionError("Servers are not in peer probed state") @@ -143,7 +136,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass): # immediately after glusterd start, that's why verifying that all # glusterd started nodes available in gluster volume status or not count = 0 - while count < 50: + while count < 80: vol_status = get_volume_status(self.mnode, self.volname) servers_count = len(vol_status[self.volname].keys()) if servers_count == 5: diff --git a/tests/functional/glusterd/test_change_reserve_limit.py b/tests/functional/glusterd/test_change_reserve_limit.py index 39ea05528..9650e5fa1 100644 --- a/tests/functional/glusterd/test_change_reserve_limit.py +++ b/tests/functional/glusterd/test_change_reserve_limit.py @@ -1,4 +1,4 @@ -# Copyright (C) 2019 Red Hat, Inc. +# Copyright (C) 2019-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -52,15 +52,6 @@ class TestChangeReservcelimit(GlusterBaseClass): def tearDown(self): - # Setting storage.reserve to Default - ret = set_volume_options(self.mnode, self.volname, - {'storage.reserve': '0'}) - if not ret: - raise ExecutionError("Failed to reset storage reserve on %s" - % self.mnode) - g.log.info("Able to reset storage reserve successfully on %s", - self.mnode) - # Unmounting the volume. ret, _, _ = umount_volume(mclient=self.mounts[0].client_system, mpoint=self.mounts[0].mountpoint) diff --git a/tests/functional/glusterd/test_create_vol_with_used_bricks.py b/tests/functional/glusterd/test_create_vol_with_used_bricks.py index c255409a0..7c48b920f 100644 --- a/tests/functional/glusterd/test_create_vol_with_used_bricks.py +++ b/tests/functional/glusterd/test_create_vol_with_used_bricks.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -26,6 +26,7 @@ from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.brick_ops import add_brick from glustolibs.gluster.brick_libs import get_all_bricks +from glustolibs.gluster.glusterdir import rmdir from glustolibs.gluster.lib_utils import form_bricks_list from glustolibs.misc.misc_libs import upload_scripts from glustolibs.io.utils import validate_io_procs @@ -108,12 +109,14 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass): g.log.info("Bricks added successfully to the volume %s", self.volname) # Mounting the volume. - ret, _, _ = mount_volume(self.volname, mtype=self.mount_type, - mpoint=self.mounts[0].mountpoint, - mserver=self.mnode, - mclient=self.mounts[0].client_system) - self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname) - g.log.info("Volume mounted successfully : %s", self.volname) + for mount_obj in self.mounts: + ret, _, _ = mount_volume(self.volname, mtype=self.mount_type, + mpoint=mount_obj.mountpoint, + mserver=self.mnode, + mclient=mount_obj.client_system) + self.assertEqual(ret, 0, ("Volume %s is not mounted") % ( + self.volname)) + g.log.info("Volume mounted successfully : %s", self.volname) # run IOs g.log.info("Starting IO on all mounts...") @@ -122,10 +125,8 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass): g.log.info("Starting IO on %s:%s", mount_obj.client_system, mount_obj.mountpoint) cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files " - "--dirname-start-num %d " - "--dir-depth 2 " - "--dir-length 5 " - "--max-num-of-dirs 3 " + "--dirname-start-num %d --dir-depth 2 " + "--dir-length 5 --max-num-of-dirs 3 " "--num-of-files 10 %s" % (sys.version_info.major, self.script_upload_path, self.counter, @@ -139,14 +140,18 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass): # Validate IO self.assertTrue( validate_io_procs(self.all_mounts_procs, self.mounts), - "IO failed on some of the clients" - ) + "IO failed on some of the clients") # Unmouting the volume. - ret, _, _ = umount_volume(mclient=self.mounts[0].client_system, - mpoint=self.mounts[0].mountpoint) - self.assertEqual(ret, 0, ("Volume %s is not unmounted") % self.volname) - g.log.info("Volume unmounted successfully : %s", self.volname) + for mount_obj in self.mounts: + ret, _, _ = umount_volume(mclient=mount_obj.client_system, + mpoint=mount_obj.mountpoint) + self.assertEqual(ret, 0, "Volume %s is not unmounted" % ( + self.volname)) + g.log.info("Volume unmounted successfully : %s", self.volname) + ret = rmdir(mount_obj.client_system, mount_obj.mountpoint) + self.assertTrue(ret, "Failed to remove directory mount directory.") + g.log.info("Mount directory is removed successfully") # Getting brick list self.brick_list = get_all_bricks(self.mnode, self.volname) diff --git a/tests/functional/glusterd/test_detach_node_used_to_mount.py b/tests/functional/glusterd/test_detach_node_used_to_mount.py index b45d7c1de..6b541e26a 100644 --- a/tests/functional/glusterd/test_detach_node_used_to_mount.py +++ b/tests/functional/glusterd/test_detach_node_used_to_mount.py @@ -1,4 +1,4 @@ -# Copyright (C) 2019 Red Hat, Inc. +# Copyright (C) 2019-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,19 +14,18 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -from time import sleep from random import randint from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume, form_bricks_list_to_add_brick) -from glustolibs.gluster.glusterdir import mkdir +from glustolibs.gluster.glusterdir import mkdir, rmdir from glustolibs.gluster.rebalance_ops import (rebalance_start, rebalance_stop) from glustolibs.gluster.peer_ops import (peer_detach, peer_probe, - is_peer_connected) + wait_for_peers_to_connect) from glustolibs.gluster.brick_ops import add_brick from glustolibs.gluster.mount_ops import mount_volume, umount_volume from glustolibs.gluster.glusterfile import (get_fattr, file_exists, @@ -66,13 +65,10 @@ class TestChangeReservcelimit(GlusterBaseClass): g.log.info("Peer probe successful %s", self.servers[4]) # Wait till peers are in connected state - count = 0 - while count < 60: - ret = is_peer_connected(self.mnode, self.servers) - if ret: - break - sleep(3) - count += 1 + for server in self.servers: + ret = wait_for_peers_to_connect(self.mnode, server) + self.assertTrue(ret, "glusterd is not connected %s with peer %s" + % (self.mnode, server)) # Unmounting and cleaning volume ret, _, _ = umount_volume(mclient=self.mounts[0].client_system, @@ -80,6 +76,10 @@ class TestChangeReservcelimit(GlusterBaseClass): if ret: raise ExecutionError("Unable to unmount volume %s" % self.volname) g.log.info("Volume unmounted successfully %s", self.volname) + ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint) + if not ret: + raise ExecutionError("Failed to remove directory mount directory.") + g.log.info("Mount directory is removed successfully") ret = cleanup_volume(self.mnode, self.volname) if not ret: diff --git a/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py b/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py index 643129b1c..95aa68739 100644 --- a/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py +++ b/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. +# Copyright (C) 2017-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,7 +14,6 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import random -from time import sleep from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on @@ -22,7 +21,9 @@ from glustolibs.gluster.lib_utils import form_bricks_list from glustolibs.gluster.volume_ops import (volume_create, set_volume_options, volume_start) from glustolibs.gluster.snap_ops import snap_create, snap_activate -from glustolibs.gluster.peer_ops import peer_detach_servers, peer_probe +from glustolibs.gluster.peer_ops import ( + peer_detach_servers, + peer_probe_servers) @runs_on([['distributed'], ['glusterfs']]) @@ -30,6 +31,10 @@ class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass): def tearDown(self): + ret = peer_probe_servers(self.mnode, self.servers) + if not ret: + raise ExecutionError("Failed to peer probe servers") + # stopping the volume and Cleaning up the volume ret = self.cleanup_volume() if not ret: @@ -124,20 +129,3 @@ class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass): g.log.info("Expected: %s path doesn't exist on peer detached node %s", self.pathname, self.random_node_peer_detach) g.rpyc_close_deployed_servers() - - # Peer probe the detached node - ret, _, _ = peer_probe(self.mnode, self.random_node_peer_detach) - self.assertEqual(ret, 0, "Peer probe of node: %s failed" % - self.random_node_peer_detach) - g.log.info("Peer probe succeeded") - - # Validating peers are in connected state - count = 0 - while count < 10: - sleep(2) - ret = self.validate_peers_are_connected() - if ret: - break - count += 1 - self.assertTrue(ret, "Peers are not in connected state") - g.log.info("Peer are in connected state") diff --git a/tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py b/tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py index c141b9628..5a8fd308f 100644 --- a/tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py +++ b/tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py @@ -1,4 +1,4 @@ -# Copyright (C) 2019 Red Hat, Inc. +# Copyright (C) 2019-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,7 @@ from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.glusterdir import (mkdir, get_dir_contents) +from glustolibs.gluster.glusterdir import (get_dir_contents, mkdir, rmdir) from glustolibs.gluster.mount_ops import mount_volume, umount_volume @@ -68,6 +68,11 @@ class TestRemoveCientLogDirAndMount(GlusterBaseClass): raise ExecutionError("Volume %s is not unmounted" % self.volname) g.log.info("Volume unmounted successfully : %s", self.volname) + ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint) + if not ret: + raise ExecutionError("Failed to remove directory mount directory.") + g.log.info("Mount directory is removed successfully") + # clean up all volumes ret = self.cleanup_volume() if not ret: diff --git a/tests/functional/glusterd/test_ops_when_one_node_is_down.py b/tests/functional/glusterd/test_ops_when_one_node_is_down.py index 5b1ea7809..c304f1dbd 100644 --- a/tests/functional/glusterd/test_ops_when_one_node_is_down.py +++ b/tests/functional/glusterd/test_ops_when_one_node_is_down.py @@ -1,4 +1,4 @@ -# Copyright (C) 2019 Red Hat, Inc. +# Copyright (C) 2019-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,13 +14,13 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -from time import sleep from random import randint from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.exceptions import ExecutionError -from glustolibs.gluster.gluster_init import stop_glusterd, start_glusterd -from glustolibs.gluster.peer_ops import peer_status, is_peer_connected +from glustolibs.gluster.gluster_init import ( + start_glusterd, stop_glusterd, wait_for_glusterd_to_start) +from glustolibs.gluster.peer_ops import peer_status, wait_for_peers_to_connect from glustolibs.gluster.volume_ops import volume_list, volume_info from glustolibs.gluster.volume_libs import (cleanup_volume, setup_volume) @@ -45,14 +45,13 @@ class TestOpsWhenOneNodeIsDown(GlusterBaseClass): ExecutionError("Failed to start glusterd.") g.log.info("Successfully started glusterd.") + ret = wait_for_glusterd_to_start(self.servers) + if not ret: + ExecutionError("glusterd is not running on %s" % self.servers) + g.log.info("Glusterd start on the nodes succeeded") + # Checking if peer is connected. - counter = 0 - while counter < 30: - ret = is_peer_connected(self.mnode, self.servers) - counter += 1 - if ret: - break - sleep(3) + ret = wait_for_peers_to_connect(self.mnode, self.servers) if not ret: ExecutionError("Peer is not in connected state.") g.log.info("Peers is in connected state.") diff --git a/tests/functional/glusterd/test_peer_detach.py b/tests/functional/glusterd/test_peer_detach.py index 8b6077b60..a3ac83fc9 100644 --- a/tests/functional/glusterd/test_peer_detach.py +++ b/tests/functional/glusterd/test_peer_detach.py @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2018 Red Hat, Inc. +# Copyright (C) 2017-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -43,14 +43,20 @@ class PeerDetachVerification(GlusterBaseClass): g.log.info("All server peers are already in connected state " "%s:", cls.servers) - @classmethod - def tearDownClass(cls): + def tearDown(self): + + ret = peer_probe_servers(self.mnode, self.servers) + if not ret: + raise ExecutionError("Failed to peer probe servers") + # stopping the volume and Cleaning up the volume - ret = cls.cleanup_volume() - if ret: - g.log.info("Volume deleted successfully : %s", cls.volname) - else: - raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname) + ret = self.cleanup_volume() + if not ret: + raise ExecutionError("Failed Cleanup the Volume %s" % self.volname) + g.log.info("Volume deleted successfully : %s", self.volname) + + # Calling GlusterBaseClass tearDown + self.get_super_method(self, 'tearDown')() def test_peer_detach_host(self): # peer Detaching specified server from cluster diff --git a/tests/functional/glusterd/test_profile_operations_with_one_node_down.py b/tests/functional/glusterd/test_profile_operations_with_one_node_down.py index 114a13187..b33a4b59b 100644 --- a/tests/functional/glusterd/test_profile_operations_with_one_node_down.py +++ b/tests/functional/glusterd/test_profile_operations_with_one_node_down.py @@ -1,4 +1,4 @@ -# Copyright (C) 2019 Red Hat, Inc. +# Copyright (C) 2019-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ from random import randint import sys -from time import sleep from glusto.core import Glusto as g @@ -33,8 +32,8 @@ from glustolibs.misc.misc_libs import upload_scripts from glustolibs.io.utils import validate_io_procs from glustolibs.gluster.brick_libs import get_online_bricks_list from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd, - is_glusterd_running) -from glustolibs.gluster.peer_ops import is_peer_connected + wait_for_glusterd_to_start) +from glustolibs.gluster.peer_ops import wait_for_peers_to_connect @runs_on([['distributed-replicated', 'dispersed', 'distributed-dispersed'], @@ -77,16 +76,11 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass): g.log.info("Successfully started glusterd.") # Checking if peer is connected - counter = 0 - while counter < 30: - ret = is_peer_connected(self.mnode, self.servers) - counter += 1 - if ret: - break - sleep(3) - if not ret: - ExecutionError("Peers are not in connected state.") - g.log.info("Peers are in connected state.") + for server in self.servers: + ret = wait_for_peers_to_connect(self.mnode, server) + if not ret: + ExecutionError("Peers are not in connected state.") + g.log.info("Peers are in connected state.") # Unmounting and cleaning volume. ret = self.unmount_volume_and_cleanup_volume(self.mounts) @@ -144,13 +138,11 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass): ret = stop_glusterd(self.servers[self.random_server]) self.assertTrue(ret, "Failed to stop glusterd on one node.") g.log.info("Successfully stopped glusterd on one node.") - counter = 0 - while counter > 20: - ret = is_glusterd_running(self.servers[self.random_server]) - if ret: - break - counter += 1 - sleep(3) + ret = wait_for_glusterd_to_start(self.servers[self.random_server]) + self.assertFalse(ret, "glusterd is still running on %s" + % self.servers[self.random_server]) + g.log.info("Glusterd stop on the nodes : %s " + "succeeded", self.servers[self.random_server]) # Getting and checking output of profile info. ret, out, _ = profile_info(self.mnode, self.volname) @@ -197,13 +189,7 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass): g.log.info("Successfully started glusterd.") # Checking if peer is connected - counter = 0 - while counter < 30: - ret = is_peer_connected(self.mnode, self.servers) - counter += 1 - if ret: - break - sleep(3) + ret = wait_for_peers_to_connect(self.mnode, self.servers) self.assertTrue(ret, "Peers are not in connected state.") g.log.info("Peers are in connected state.") diff --git a/tests/functional/glusterd/test_rebalance_hang.py b/tests/functional/glusterd/test_rebalance_hang.py index 9bbf6d8f4..9c5eb93cf 100644 --- a/tests/functional/glusterd/test_rebalance_hang.py +++ b/tests/functional/glusterd/test_rebalance_hang.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,12 +14,12 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -from time import sleep from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.volume_ops import (volume_create, volume_start, get_volume_list, get_volume_status) from glustolibs.gluster.brick_libs import get_all_bricks +from glustolibs.gluster.glusterdir import rmdir from glustolibs.gluster.volume_libs import (cleanup_volume) from glustolibs.gluster.peer_ops import (peer_probe, peer_detach, peer_probe_servers, @@ -32,7 +32,8 @@ from glustolibs.gluster.rebalance_ops import (rebalance_start, from glustolibs.gluster.mount_ops import mount_volume, umount_volume from glustolibs.io.utils import validate_io_procs from glustolibs.gluster.gluster_init import (start_glusterd, stop_glusterd, - is_glusterd_running) + is_glusterd_running, + wait_for_glusterd_to_start) @runs_on([['distributed'], ['glusterfs']]) @@ -57,6 +58,11 @@ class TestRebalanceHang(GlusterBaseClass): self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname)) g.log.info("Successfully Unmounted Volume %s", self.volname) + ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint) + if not ret: + raise ExecutionError("Failed to remove directory mount directory.") + g.log.info("Mount directory is removed successfully") + # Clean up all volumes and peer probe to form cluster vol_list = get_volume_list(self.mnode) if vol_list is not None: @@ -141,16 +147,14 @@ class TestRebalanceHang(GlusterBaseClass): "do mkdir l1_dir.$i/l2_dir.$j ; " "for k in `seq 1 10` ; " "do dd if=/dev/urandom of=l1_dir.$i/l2_dir.$j/test.$k " - "bs=128k count=$k ; " - "done ; " - "done ; " - "done ; " + "bs=128k count=$k ; done ; done ; done ; " % (self.mounts[0].mountpoint)) proc = g.run_async(self.mounts[0].client_system, command, user=self.mounts[0].user) self.all_mounts_procs.append(proc) self.io_validation_complete = False + # Validate IO ret = validate_io_procs(self.all_mounts_procs, self.mounts) self.io_validation_complete = True @@ -186,14 +190,8 @@ class TestRebalanceHang(GlusterBaseClass): # Start glusterd on the node where it is stopped ret = start_glusterd(self.servers[1]) self.assertTrue(ret, "glusterd start on the node failed") - count = 0 - while count < 60: - ret = is_glusterd_running(self.servers[1]) - if not ret: - break - sleep(2) - count += 1 - self.assertEqual(ret, 0, "glusterd is not running on %s" - % self.servers[1]) + ret = wait_for_glusterd_to_start(self.servers[1]) + self.assertTrue(ret, "glusterd is not running on %s" + % self.servers[1]) g.log.info("Glusterd start on the nodes : %s " "succeeded", self.servers[1]) diff --git a/tests/functional/glusterd/test_rebalance_new_node.py b/tests/functional/glusterd/test_rebalance_new_node.py index 98d2acc20..71ce4565b 100644 --- a/tests/functional/glusterd/test_rebalance_new_node.py +++ b/tests/functional/glusterd/test_rebalance_new_node.py @@ -1,4 +1,4 @@ -# Copyright (C) 2016-2017 Red Hat, Inc. +# Copyright (C) 2016-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -19,6 +19,7 @@ import sys from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.glusterdir import rmdir from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume) from glustolibs.gluster.volume_ops import (get_volume_list) @@ -67,6 +68,12 @@ class TestRebalanceStatus(GlusterBaseClass): # unmount the volume ret = self.unmount_volume(self.mounts) self.assertTrue(ret, "Volume unmount failed for %s" % self.volname) + for mount_obj in self.mounts: + ret = rmdir(mount_obj.client_system, mount_obj.mountpoint) + if not ret: + raise ExecutionError("Failed to remove directory " + "mount directory.") + g.log.info("Mount directory is removed successfully") # get volumes list and clean up all the volumes vol_list = get_volume_list(self.mnode) diff --git a/tests/functional/glusterd/test_rebalance_spurious.py b/tests/functional/glusterd/test_rebalance_spurious.py index 7b8fc513b..d0e792bd6 100644 --- a/tests/functional/glusterd/test_rebalance_spurious.py +++ b/tests/functional/glusterd/test_rebalance_spurious.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -30,7 +30,7 @@ from glustolibs.gluster.brick_ops import remove_brick from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.rebalance_ops import (rebalance_start, wait_for_fix_layout_to_complete) -from glustolibs.gluster.glusterdir import mkdir +from glustolibs.gluster.glusterdir import mkdir, rmdir from glustolibs.gluster.mount_ops import mount_volume, umount_volume from glustolibs.gluster.glusterfile import get_fattr @@ -57,6 +57,11 @@ class TestSpuriousRebalance(GlusterBaseClass): self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname)) g.log.info("Successfully Unmounted Volume %s", self.volname) + ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint) + if not ret: + raise ExecutionError("Failed to remove directory mount directory.") + g.log.info("Mount directory is removed successfully") + # Clean up all volumes and peer probe to form cluster vol_list = get_volume_list(self.mnode) if vol_list is not None: diff --git a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py index a538ba073..4dd2d3d13 100644 --- a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py +++ b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -29,7 +29,10 @@ from glustolibs.gluster.lib_utils import form_bricks_list from glustolibs.gluster.brick_ops import remove_brick from glustolibs.gluster.exceptions import ExecutionError from glustolibs.gluster.mount_ops import mount_volume, umount_volume -from glustolibs.io.utils import validate_io_procs +from glustolibs.io.utils import ( + wait_for_io_to_complete, + validate_io_procs) +from glustolibs.gluster.glusterdir import rmdir from glustolibs.gluster.gluster_init import restart_glusterd @@ -54,6 +57,10 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass): self.mounts[0].mountpoint, mtype=self.mount_type) self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname)) g.log.info("Successfully Unmounted Volume %s", self.volname) + ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint) + if not ret: + raise ExecutionError("Failed to remove directory mount directory.") + g.log.info("Mount directory is removed successfully") # Clean up all volumes and peer probe to form cluster vol_list = get_volume_list(self.mnode) @@ -165,6 +172,12 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass): user=self.mounts[0].user) self.all_mounts_procs.append(proc) self.io_validation_complete = False + + # wait for io to complete + self.assertTrue( + wait_for_io_to_complete(self.all_mounts_procs, self.mounts), + "Io failed to complete on some of the clients") + # Validate IO ret = validate_io_procs(self.all_mounts_procs, self.mounts) self.io_validation_complete = True diff --git a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py index c0a32960d..6b6256872 100644 --- a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py +++ b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -19,8 +19,8 @@ """ import sys -from time import sleep +from time import sleep from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError @@ -30,7 +30,7 @@ from glustolibs.gluster.brick_ops import add_brick from glustolibs.gluster.rebalance_ops import (rebalance_start, get_rebalance_status) from glustolibs.gluster.gluster_init import (restart_glusterd, - is_glusterd_running) + wait_for_glusterd_to_start) from glustolibs.io.utils import validate_io_procs from glustolibs.misc.misc_libs import upload_scripts from glustolibs.gluster.glusterdir import get_dir_contents @@ -178,13 +178,7 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass): g.log.info("Glusterd restarted successfully on %s", self.servers) # Checking glusterd status - count = 0 - while count < 60: - ret = is_glusterd_running(self.servers) - if not ret: - break - sleep(2) - count += 1 - self.assertEqual(ret, 0, "Glusterd is not running on some of the " - "servers") + ret = wait_for_glusterd_to_start(self.servers) + self.assertTrue(ret, "Glusterd is not running on some of the " + "servers") g.log.info("Glusterd is running on all servers %s", self.servers) -- cgit