summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/glusterd')
-rw-r--r--tests/functional/glusterd/test_add_brick.py4
-rw-r--r--tests/functional/glusterd/test_add_brick_when_quorum_not_met.py23
-rw-r--r--tests/functional/glusterd/test_add_identical_brick_new_node.py14
-rw-r--r--tests/functional/glusterd/test_brick_log_messages.py16
-rw-r--r--tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py51
-rw-r--r--tests/functional/glusterd/test_brick_status_when_quorum_not_met.py48
-rw-r--r--tests/functional/glusterd/test_bricks_online_after_node_reboot.py178
-rw-r--r--tests/functional/glusterd/test_change_reserve_limit.py29
-rw-r--r--tests/functional/glusterd/test_change_reserve_limit_to_wrong_values.py80
-rw-r--r--tests/functional/glusterd/test_concurrent_set.py4
-rw-r--r--tests/functional/glusterd/test_create_vol_with_used_bricks.py46
-rw-r--r--tests/functional/glusterd/test_default_log_level_of_cli.py97
-rw-r--r--tests/functional/glusterd/test_default_max_bricks_per_process.py100
-rw-r--r--tests/functional/glusterd/test_default_ping_timer_and_epoll_thread_count.py87
-rw-r--r--tests/functional/glusterd/test_detach_node_used_to_mount.py19
-rw-r--r--tests/functional/glusterd/test_enable_brickmux_create_and_stop_three_volumes.py2
-rw-r--r--tests/functional/glusterd/test_enable_storage_reserve_volume.py79
-rw-r--r--tests/functional/glusterd/test_enabling_glusterd_debug_mode.py38
-rw-r--r--tests/functional/glusterd/test_get_state_on_brick_unmount.py126
-rw-r--r--tests/functional/glusterd/test_getstate_shows_correct_brick_status_when_brick_killed.py124
-rw-r--r--tests/functional/glusterd/test_gluster_detect_drop_of_outbound_traffic.py115
-rw-r--r--tests/functional/glusterd/test_gluster_does_not_do_posix_lock_when_multiple_client.py91
-rw-r--r--tests/functional/glusterd/test_gluster_volume_status_xml_dump.py106
-rw-r--r--tests/functional/glusterd/test_glusterd_default_volume_behavior_quorum_options.py144
-rw-r--r--tests/functional/glusterd/test_glusterd_gluster_process_stop_start_cycle.py123
-rw-r--r--tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py18
-rw-r--r--tests/functional/glusterd/test_glusterd_memory_consumption_increase.py207
-rw-r--r--tests/functional/glusterd/test_glusterd_quorum.py300
-rw-r--r--tests/functional/glusterd/test_glusterd_quorum_command.py104
-rw-r--r--tests/functional/glusterd/test_glusterd_replace_brick.py4
-rw-r--r--tests/functional/glusterd/test_glusterd_restart_quorum_not_met.py125
-rw-r--r--tests/functional/glusterd/test_glusterd_selinux.py75
-rw-r--r--tests/functional/glusterd/test_glusterd_set_reset_reserve_limit.py95
-rw-r--r--tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py40
-rw-r--r--tests/functional/glusterd/test_glusterd_split_brain.py30
-rw-r--r--tests/functional/glusterd/test_host_uuid_in_volume_info_xml.py4
-rw-r--r--tests/functional/glusterd/test_lower_gluster_op_version.py10
-rw-r--r--tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py8
-rw-r--r--tests/functional/glusterd/test_mountpoint_ownership_post_volume_restart.py109
-rw-r--r--tests/functional/glusterd/test_nfs_quorum.py6
-rw-r--r--tests/functional/glusterd/test_op_version.py9
-rw-r--r--tests/functional/glusterd/test_ops_when_one_node_is_down.py25
-rw-r--r--tests/functional/glusterd/test_peer_detach.py127
-rw-r--r--tests/functional/glusterd/test_peer_detach_check_warning_message.py2
-rw-r--r--tests/functional/glusterd/test_peer_probe.py4
-rw-r--r--tests/functional/glusterd/test_peer_probe_after_setting_global_options.py4
-rw-r--r--tests/functional/glusterd/test_peer_probe_firewall_ports_not_opened.py140
-rw-r--r--tests/functional/glusterd/test_peer_probe_while_snapd_running.py2
-rw-r--r--tests/functional/glusterd/test_peer_status.py5
-rw-r--r--tests/functional/glusterd/test_probe_glusterd.py18
-rw-r--r--tests/functional/glusterd/test_probe_glusterd_down.py140
-rw-r--r--tests/functional/glusterd/test_probe_hostname.py4
-rw-r--r--tests/functional/glusterd/test_profile_info_without_having_profile_started.py188
-rw-r--r--tests/functional/glusterd/test_profile_operations.py22
-rw-r--r--tests/functional/glusterd/test_profile_operations_with_one_node_down.py62
-rw-r--r--tests/functional/glusterd/test_profile_simultaneously_on_different_nodes.py185
-rw-r--r--tests/functional/glusterd/test_profile_start_with_quorum_not_met.py14
-rw-r--r--tests/functional/glusterd/test_quorum_remove_brick.py32
-rw-r--r--tests/functional/glusterd/test_quorum_syslog.py7
-rw-r--r--tests/functional/glusterd/test_readonly_option_on_volume.py15
-rw-r--r--tests/functional/glusterd/test_rebalance_hang.py36
-rw-r--r--tests/functional/glusterd/test_rebalance_new_node.py21
-rw-r--r--tests/functional/glusterd/test_rebalance_spurious.py25
-rw-r--r--tests/functional/glusterd/test_rebalance_start_not_failed_with_socket_path_too_long.py173
-rw-r--r--tests/functional/glusterd/test_rebalance_when_quorum_not_met.py154
-rw-r--r--tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py13
-rw-r--r--tests/functional/glusterd/test_remove_brick_scenarios.py6
-rw-r--r--tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py12
-rw-r--r--tests/functional/glusterd/test_replace_brick_quorum_not_met.py19
-rw-r--r--tests/functional/glusterd/test_reserve_limt_change_while_rebalance.py127
-rw-r--r--tests/functional/glusterd/test_reserved_port_range_for_gluster.py152
-rw-r--r--tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py53
-rw-r--r--tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py176
-rw-r--r--tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py39
-rw-r--r--tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py46
-rw-r--r--tests/functional/glusterd/test_shared_storage.py247
-rw-r--r--tests/functional/glusterd/test_status_string_in_volstatus.py4
-rw-r--r--tests/functional/glusterd/test_updates_in_options_file_on_quorum_changes.py94
-rw-r--r--tests/functional/glusterd/test_validate_auth_allow_and_auth_reject.py162
-rw-r--r--tests/functional/glusterd/test_validate_glusterd_info.py96
-rwxr-xr-xtests/functional/glusterd/test_validate_peer_probe_ip_fqdn_hostname.py146
-rw-r--r--tests/functional/glusterd/test_verify_df_output.py171
-rw-r--r--tests/functional/glusterd/test_volume_create.py4
-rw-r--r--tests/functional/glusterd/test_volume_create_with_glusterd_restarts.py21
-rw-r--r--tests/functional/glusterd/test_volume_delete.py19
-rw-r--r--tests/functional/glusterd/test_volume_get.py15
-rw-r--r--tests/functional/glusterd/test_volume_network_ping_timeout.py20
-rw-r--r--tests/functional/glusterd/test_volume_operations.py4
-rw-r--r--tests/functional/glusterd/test_volume_reduce_replica.py4
-rw-r--r--tests/functional/glusterd/test_volume_reset.py6
-rw-r--r--tests/functional/glusterd/test_volume_set_when_glusterd_stopped_on_one_node.py193
-rw-r--r--tests/functional/glusterd/test_volume_set_with_quorum_enabled.py28
-rw-r--r--tests/functional/glusterd/test_volume_status.py16
-rw-r--r--tests/functional/glusterd/test_volume_status_fd.py16
-rw-r--r--tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py138
-rw-r--r--tests/functional/glusterd/test_volume_status_with_absent_bricks.py84
-rw-r--r--tests/functional/glusterd/test_volume_status_xml.py32
-rw-r--r--tests/functional/glusterd/test_xml_dump_of_gluster_volume_status_during_rebalance.py185
98 files changed, 5962 insertions, 679 deletions
diff --git a/tests/functional/glusterd/test_add_brick.py b/tests/functional/glusterd/test_add_brick.py
index c802c79be..b8646098d 100644
--- a/tests/functional/glusterd/test_add_brick.py
+++ b/tests/functional/glusterd/test_add_brick.py
@@ -33,7 +33,7 @@ class TestVolumeCreate(GlusterBaseClass):
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# check whether peers are in connected state
if not cls.validate_peers_are_connected():
@@ -57,7 +57,7 @@ class TestVolumeCreate(GlusterBaseClass):
raise ExecutionError("Failed to delete the brick dirs.")
g.log.info("Successfully cleaned all the brick dirs.")
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_add_brick_functionality(self):
diff --git a/tests/functional/glusterd/test_add_brick_when_quorum_not_met.py b/tests/functional/glusterd/test_add_brick_when_quorum_not_met.py
index 1e4c782b3..0e0a58842 100644
--- a/tests/functional/glusterd/test_add_brick_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_add_brick_when_quorum_not_met.py
@@ -20,8 +20,8 @@ from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.volume_libs import setup_volume
from glustolibs.gluster.volume_ops import (set_volume_options,
- get_volume_status,
- volume_reset)
+ volume_reset,
+ get_volume_status)
from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
is_glusterd_running)
from glustolibs.gluster.brick_libs import get_all_bricks, are_bricks_offline
@@ -56,12 +56,6 @@ class TestAddBrickWhenQuorumNotMet(GlusterBaseClass):
g.log.info("Peers are in connected state")
- # reset quorum ratio to default
- g.log.info("resetting quorum ratio")
- ret, _, _ = volume_reset(self.mnode, 'all')
- self.assertEqual(ret, 0, "Failed to reset quorum ratio")
- g.log.info("Successfully resetted quorum ratio")
-
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
@@ -69,16 +63,15 @@ class TestAddBrickWhenQuorumNotMet(GlusterBaseClass):
% self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
- # Setting quorum ratio to 51%
- ret = set_volume_options(self.mnode, 'all',
- {'cluster.server-quorum-ratio': '51%'})
+ # Reset Cluster options
+ ret = volume_reset(self.mnode, 'all')
if not ret:
- raise ExecutionError("Failed to set server quorum ratio on %s"
+ raise ExecutionError("Failed to reset cluster options on %s"
% self.volname)
- g.log.info("Able to set server quorum ratio successfully on %s",
+ g.log.info("Cluster options reset successfully on %s",
self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_add_brick_when_quorum_not_met(self):
@@ -107,7 +100,7 @@ class TestAddBrickWhenQuorumNotMet(GlusterBaseClass):
# bring down glusterd of half nodes
num_of_servers = len(self.servers)
- num_of_nodes_to_bring_down = num_of_servers/2
+ num_of_nodes_to_bring_down = num_of_servers//2
for node in range(num_of_nodes_to_bring_down, num_of_servers):
ret = stop_glusterd(self.servers[node])
diff --git a/tests/functional/glusterd/test_add_identical_brick_new_node.py b/tests/functional/glusterd/test_add_identical_brick_new_node.py
index 240436d88..849894943 100644
--- a/tests/functional/glusterd/test_add_identical_brick_new_node.py
+++ b/tests/functional/glusterd/test_add_identical_brick_new_node.py
@@ -41,7 +41,7 @@ class TestAddIdenticalBrick(GlusterBaseClass):
if ret != 0:
raise ExecutionError("Peer detach failed")
g.log.info("Peer detach SUCCESSFUL.")
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
"""
@@ -65,7 +65,7 @@ class TestAddIdenticalBrick(GlusterBaseClass):
"servers %s" % self.servers)
g.log.info("Peer probe success for detached "
"servers %s", self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_add_identical_brick(self):
"""
@@ -115,8 +115,14 @@ class TestAddIdenticalBrick(GlusterBaseClass):
# Replace just host IP to create identical brick
add_bricks = []
- add_bricks.append(string.replace(bricks_list[0],
- self.servers[0], self.servers[1]))
+ try:
+ add_bricks.append(string.replace(bricks_list[0],
+ self.servers[0],
+ self.servers[1]))
+ except AttributeError:
+ add_bricks.append(str.replace(bricks_list[0],
+ self.servers[0],
+ self.servers[1]))
ret, _, _ = add_brick(self.mnode, self.volname, add_bricks)
self.assertEqual(ret, 0, "Failed to add the bricks to the volume")
g.log.info("Successfully added bricks to volume %s", add_bricks[0])
diff --git a/tests/functional/glusterd/test_brick_log_messages.py b/tests/functional/glusterd/test_brick_log_messages.py
index d1f8f2fb5..1313fbe14 100644
--- a/tests/functional/glusterd/test_brick_log_messages.py
+++ b/tests/functional/glusterd/test_brick_log_messages.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,8 +19,8 @@
from mountpoint
"""
-
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.brick_libs import get_all_bricks
@@ -35,16 +35,14 @@ class TestAddBrickFunctionality(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.counter = 1
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -56,7 +54,7 @@ class TestAddBrickFunctionality(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
ret = self.setup_volume_and_mount_volume(self.mounts)
@@ -78,7 +76,7 @@ class TestAddBrickFunctionality(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_brick_log_messages(self):
'''
@@ -103,7 +101,7 @@ class TestAddBrickFunctionality(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 5 "
diff --git a/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py b/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
index e44514aff..865230d1a 100644
--- a/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
+++ b/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -13,8 +13,8 @@
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
from time import sleep
+
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import (volume_create, volume_start,
@@ -23,14 +23,14 @@ from glustolibs.gluster.volume_ops import (volume_create, volume_start,
from glustolibs.gluster.brick_libs import get_all_bricks
from glustolibs.gluster.volume_libs import (cleanup_volume)
from glustolibs.gluster.peer_ops import (peer_probe, peer_detach,
- is_peer_connected,
peer_probe_servers,
peer_detach_servers,
- nodes_from_pool_list)
+ nodes_from_pool_list,
+ wait_for_peers_to_connect)
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_init import (start_glusterd, stop_glusterd,
- is_glusterd_running)
+ wait_for_glusterd_to_start)
@runs_on([['distributed'], ['glusterfs']])
@@ -44,7 +44,7 @@ class TestBrickPortAfterModifyVolume(GlusterBaseClass):
raise ExecutionError("Failed to probe detached "
"servers %s" % self.servers)
g.log.info("Peer detach SUCCESSFUL.")
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
"""
@@ -68,7 +68,7 @@ class TestBrickPortAfterModifyVolume(GlusterBaseClass):
"servers %s" % self.servers)
g.log.info("Peer probe success for detached "
"servers %s", self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_brick_port(self):
# pylint: disable=too-many-statements, too-many-branches
@@ -131,14 +131,8 @@ class TestBrickPortAfterModifyVolume(GlusterBaseClass):
ret = stop_glusterd(self.servers[1])
self.assertTrue(ret, "Failed to stop glusterd on one of the node")
- count = 0
- while count < 60:
- ret = is_glusterd_running(self.servers[1])
- if ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 1, "glusterd is still running on %s"
+ ret = wait_for_glusterd_to_start(self.servers[1])
+ self.assertFalse(ret, "glusterd is still running on %s"
% self.servers[1])
g.log.info("Glusterd stop on the nodes : %s "
"succeeded", self.servers[1])
@@ -155,29 +149,18 @@ class TestBrickPortAfterModifyVolume(GlusterBaseClass):
self.assertTrue(ret, "Failed to start glusterd on one of the node")
g.log.info("Glusterd start on the nodes : %s "
"succeeded", self.servers[1])
- count = 0
- while count < 60:
- ret = is_glusterd_running(self.servers[1])
- if not ret:
- break
- sleep(2)
- count += 1
-
- self.assertEqual(ret, 0, "glusterd is not running on %s"
- % self.servers[1])
+ ret = wait_for_glusterd_to_start(self.servers[1])
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % self.servers[1])
g.log.info("Glusterd start on the nodes : %s "
"succeeded", self.servers[1])
- count = 0
- while count < 60:
- ret = is_peer_connected(self.servers[0], self.servers[1])
- if ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 1, "glusterd is not connected %s with peer %s"
- % (self.servers[0], self.servers[1]))
+ ret = wait_for_peers_to_connect(self.servers[0], self.servers[1])
+ self.assertTrue(ret, "glusterd is not connected %s with peer %s"
+ % (self.servers[0], self.servers[1]))
+ # Waiting for 5 sec so that the brick will get port
+ sleep(5)
vol_status = get_volume_status(self.mnode, self.volname)
self.assertIsNotNone(vol_status, "Failed to get volume "
"status for %s" % self.volname)
diff --git a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
index b52bd0beb..2679bebee 100644
--- a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,11 +23,13 @@ from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import set_volume_options
-from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
- is_glusterd_running)
+from glustolibs.gluster.gluster_init import (
+ is_glusterd_running, start_glusterd, stop_glusterd)
from glustolibs.gluster.brick_libs import (are_bricks_offline,
get_all_bricks)
from glustolibs.gluster.volume_ops import get_volume_status
+from glustolibs.gluster.peer_ops import (
+ peer_probe_servers, is_peer_connected, wait_for_peers_to_connect)
@runs_on([['distributed-replicated'], ['glusterfs']])
@@ -38,7 +40,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
ret = self.setup_volume()
@@ -54,27 +56,19 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
if ret:
ret = start_glusterd(self.servers)
if not ret:
- raise ExecutionError("Glusterd not started on some of "
- "the servers")
- # checking for peer status from every node
- count = 0
- while count < 80:
- ret = self.validate_peers_are_connected()
- if ret:
- break
- sleep(2)
- count += 1
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ # Takes 5 seconds to restart glusterd into peer connected state
+ sleep(5)
+ g.log.info("Glusterd started successfully on %s", self.servers)
+ # checking for peer status from every node
+ ret = is_peer_connected(self.mnode, self.servers)
if not ret:
- raise ExecutionError("Servers are not in peer probed state")
-
- # Setting quorum ratio to 51%
- ret = set_volume_options(self.mnode, 'all',
- {'cluster.server-quorum-ratio': '51%'})
- self.assertTrue(ret, "Failed to set quorum ratio to 51 percentage on "
- "servers %s" % self.servers)
- g.log.info("Able to set server quorum ratio to 51 percentage "
- "on servers %s", self.servers)
+ ret = peer_probe_servers(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Failed to peer probe failed in "
+ "servers %s" % self.servers)
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
@@ -83,7 +77,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_remove_brick_status(self):
'''
@@ -138,12 +132,16 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
g.log.info("Glusterd started successfully on all servers except "
"last node %s", self.servers[1:5])
+ self.assertTrue(
+ wait_for_peers_to_connect(self.mnode, self.servers[1:5]),
+ "Peers are not in connected state")
+
# Verfiying node count in volume status after glusterd
# started on servers, Its not possible to check the brick status
# immediately after glusterd start, that's why verifying that all
# glusterd started nodes available in gluster volume status or not
count = 0
- while count < 50:
+ while count < 200:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 5:
diff --git a/tests/functional/glusterd/test_bricks_online_after_node_reboot.py b/tests/functional/glusterd/test_bricks_online_after_node_reboot.py
new file mode 100644
index 000000000..87f6301a3
--- /dev/null
+++ b/tests/functional/glusterd/test_bricks_online_after_node_reboot.py
@@ -0,0 +1,178 @@
+# Copyright (C) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Test Cases in this module related to gluster bricks are online
+after node reboot or not
+"""
+from random import choice
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import (volume_start, volume_stop,
+ volume_create, set_volume_options,
+ get_volume_list)
+from glustolibs.gluster.gluster_init import is_glusterd_running
+from glustolibs.gluster.volume_libs import cleanup_volume
+from glustolibs.gluster.brick_libs import wait_for_bricks_to_be_online
+from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.misc.misc_libs import reboot_nodes_and_wait_to_come_online
+
+
+@runs_on([['distributed-dispersed'], ['glusterfs']])
+class BricksOnlineAfterNodeReboot(GlusterBaseClass):
+ def setUp(self):
+ """
+ setUp method for every test
+ """
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume
+ g.log.info("Started creating volume")
+ ret = self.setup_volume()
+ if ret:
+ g.log.info("Volme created successfully : %s", self.volname)
+ else:
+ raise ExecutionError("Volume creation failed: %s" % self.volname)
+
+ def tearDown(self):
+ """
+ tearDown for every test
+ """
+ # Cleaning up the volume
+ volume_list = get_volume_list(choice(self.servers))
+ for volume in volume_list:
+ ret = cleanup_volume(self.mnode, volume)
+ if not ret:
+ raise ExecutionError("Failed Cleanup the Volume %s" % volume)
+ g.log.info("Successfully cleaned up all the volumes")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def check_bricks_online(self, all_volumes):
+ for volume in all_volumes:
+ self.assertTrue(wait_for_bricks_to_be_online(
+ self.mnode, volume), "Unexpected: Few bricks are offline")
+ g.log.info("All bricks are online in the volume %s ", volume)
+
+ def check_node_after_reboot(self, server):
+ count = 0
+ while count < 80:
+ ret = is_glusterd_running(server)
+ if not ret:
+ ret = self.validate_peers_are_connected()
+ if ret:
+ g.log.info("glusterd is running and all peers are in"
+ "connected state")
+ break
+ count += 1
+ sleep(10)
+ self.assertNotEqual(count, 60, "Either glusterd is not runnig or peers"
+ " are not in connected state")
+
+ def test_bricks_online_after_node_reboot(self):
+ '''
+ Create all types of volumes
+ Start the volume and check the bricks are online
+ Reboot a node at random
+ After the node is up check the bricks are online
+ Set brick-mux to on
+ stop and start the volume to get the brick-mux into effect
+ Check all bricks are online
+ Now perform node reboot
+ After node reboot all bricks should be online
+ '''
+
+ # Creating all types of volumes disperse, replicate, arbiter
+ all_volumes = ['disperse', 'replicate', 'arbiter']
+ for volume in all_volumes:
+ bricks_list = form_bricks_list(self.mnode, volume,
+ 6 if volume == "disperse" else 3,
+ self.servers,
+ self.all_servers_info)
+ if volume == "disperse":
+ ret, _, _ = volume_create(self.mnode, volume, bricks_list,
+ disperse_count=6,
+ redundancy_count=2)
+ elif volume == "replicate":
+ ret, _, _ = volume_create(self.mnode, volume, bricks_list,
+ replica_count=3)
+ else:
+ ret, _, _ = volume_create(self.mnode, volume, bricks_list,
+ replica_count=3, arbiter_count=1)
+ self.assertEqual(ret, 0, "Unexpected: Volume create '%s' failed"
+ % volume)
+ g.log.info("volume create %s succeeded", volume)
+ # All volumes start
+ for volume in all_volumes:
+ ret, _, _ = volume_start(self.mnode, volume)
+ self.assertEqual(ret, 0, "Unexpected: Volume start succeded %s"
+ % volume)
+ g.log.info("Volume started succesfully %s", volume)
+
+ # Adding self.volname to the all_volumes list
+ all_volumes.append(self.volname)
+
+ # Validate whether all volume bricks are online or not
+ self.check_bricks_online(all_volumes)
+ # Perform node reboot
+ random_server = choice(self.servers)
+ ret, _ = reboot_nodes_and_wait_to_come_online(random_server)
+ self.assertTrue(ret, "Reboot Failed on node %s" % random_server)
+ g.log.info("Node: %s rebooted successfully", random_server)
+
+ # Wait till glusterd is started on the node rebooted
+ self.check_node_after_reboot(random_server)
+
+ # After reboot check bricks are online
+ self.check_bricks_online(all_volumes)
+
+ # Enable brick-mux on and stop and start the volumes
+ ret = set_volume_options(self.mnode, 'all',
+ {"cluster.brick-multiplex": "enable"})
+ self.assertTrue(ret, "Unable to set the volume option")
+ g.log.info("Brick-mux option enabled successfully")
+ self.addCleanup(set_volume_options, self.mnode, 'all',
+ {"cluster.brick-multiplex": "disable"})
+
+ # Stop all the volumes in the cluster
+ for vol in all_volumes:
+ ret, _, _ = volume_stop(self.mnode, vol)
+ self.assertEqual(ret, 0, "volume stop failed on %s" % vol)
+ g.log.info("volume: %s stopped successfully", vol)
+
+ # Starting the volume to get brick-mux into effect
+ for vol in all_volumes:
+ ret, _, _ = volume_start(self.mnode, vol)
+ self.assertEqual(ret, 0, "volume start failed on %s" % vol)
+ g.log.info("volume: %s started successfully", vol)
+
+ # Checking all bricks are online or not
+ self.check_bricks_online(all_volumes)
+
+ # Perform node reboot
+ ret, _ = reboot_nodes_and_wait_to_come_online(random_server)
+ self.assertTrue(ret, "Reboot Failed on node %s" % random_server)
+ g.log.info("Node: %s rebooted successfully", random_server)
+
+ # Wait till glusterd is started on the node rebooted
+ self.check_node_after_reboot(random_server)
+
+ # Validating bricks are online after node reboot
+ self.check_bricks_online(all_volumes)
diff --git a/tests/functional/glusterd/test_change_reserve_limit.py b/tests/functional/glusterd/test_change_reserve_limit.py
index 07aea98ae..fb6c09edd 100644
--- a/tests/functional/glusterd/test_change_reserve_limit.py
+++ b/tests/functional/glusterd/test_change_reserve_limit.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,6 +15,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from glusto.core import Glusto as g
+
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.misc.misc_libs import upload_scripts
@@ -24,7 +25,6 @@ from glustolibs.gluster.mount_ops import mount_volume, umount_volume
from glustolibs.io.utils import validate_io_procs
from glustolibs.gluster.brick_libs import get_all_bricks
from glustolibs.gluster.brick_ops import remove_brick
-from glustolibs.gluster.glusterdir import rmdir
@runs_on([['distributed-replicated'], ['glusterfs']])
@@ -33,16 +33,14 @@ class TestChangeReservcelimit(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.counter = 1
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to "
"clients %s" % cls.clients)
@@ -51,25 +49,12 @@ class TestChangeReservcelimit(GlusterBaseClass):
def tearDown(self):
- # Setting storage.reserve to Default
- ret = set_volume_options(self.mnode, self.volname,
- {'storage.reserve': '0'})
- if not ret:
- raise ExecutionError("Failed to reset storage reserve on %s"
- % self.mnode)
- g.log.info("Able to reset storage reserve successfully on %s",
- self.mnode)
-
# Unmounting the volume.
ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
mpoint=self.mounts[0].mountpoint)
if ret:
raise ExecutionError("Volume %s is not unmounted" % self.volname)
g.log.info("Volume unmounted successfully : %s", self.volname)
- ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint)
- if not ret:
- raise ExecutionError("Failed to remove directory mount directory.")
- g.log.info("Mount directory is removed successfully")
# clean up all volumes
vol_list = get_volume_list(self.mnode)
@@ -91,7 +76,7 @@ class TestChangeReservcelimit(GlusterBaseClass):
raise ExecutionError("Failed to delete the brick "
"dir's of deleted volume")
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_change_reserve_limit_to_lower_value(self):
@@ -134,7 +119,7 @@ class TestChangeReservcelimit(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 5 "
@@ -222,7 +207,7 @@ class TestChangeReservcelimit(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 5 "
diff --git a/tests/functional/glusterd/test_change_reserve_limit_to_wrong_values.py b/tests/functional/glusterd/test_change_reserve_limit_to_wrong_values.py
new file mode 100644
index 000000000..334639e7c
--- /dev/null
+++ b/tests/functional/glusterd/test_change_reserve_limit_to_wrong_values.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import string
+from random import choice
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import set_volume_options
+
+
+@runs_on([['distributed-replicated'], ['glusterfs']])
+class TestChangeReserveLimit(GlusterBaseClass):
+ """
+ Test to validate behaviour of 'storage.reserve' option on supplying
+ erroneous values.
+ """
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Failed to create the volume")
+ g.log.info("Created volume successfully")
+
+ def tearDown(self):
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup the volume")
+ g.log.info("Successfully cleaned the volume")
+ self.get_super_method(self, 'tearDown')()
+
+ @staticmethod
+ def get_random_string(chars, str_len=4):
+ return ''.join((choice(chars) for _ in range(str_len)))
+
+ def test_change_reserve_limit_to_wrong_value(self):
+ """
+ Test Case:
+ 1) Create and start a distributed-replicated volume.
+ 2) Give different inputs to the storage.reserve volume set options
+ 3) Validate the command behaviour on wrong inputs
+ """
+
+ # Creation of random data for storage.reserve volume option
+ # Data has: alphabets, numbers, punctuations and their combinations
+ key = 'storage.reserve'
+
+ for char_type in (string.ascii_letters, string.punctuation,
+ string.printable):
+
+ # Remove quotes from the generated string
+ temp_val = self.get_random_string(char_type)
+ temp_val = temp_val.replace("'", "").replace("&", "")
+ value = "'{}'".format(temp_val)
+ ret = set_volume_options(self.mnode, self.volname, {key: value})
+ self.assertFalse(
+ ret, "Unexpected: Erroneous value {}, to option "
+ "{} should result in failure".format(value, key))
+
+ # Passing an out of range value
+ for value in ('-1%', '-101%', '101%', '-1', '-101'):
+ ret = set_volume_options(self.mnode, self.volname, {key: value})
+ self.assertFalse(
+ ret, "Unexpected: Erroneous value {}, to option "
+ "{} should result in failure".format(value, key))
diff --git a/tests/functional/glusterd/test_concurrent_set.py b/tests/functional/glusterd/test_concurrent_set.py
index 4b432b784..72b8a1509 100644
--- a/tests/functional/glusterd/test_concurrent_set.py
+++ b/tests/functional/glusterd/test_concurrent_set.py
@@ -28,7 +28,7 @@ from glustolibs.gluster.lib_utils import (form_bricks_list,
class TestConcurrentSet(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
g.log.info("Starting %s ", cls.__name__)
ret = cls.validate_peers_are_connected()
if not ret:
@@ -44,7 +44,7 @@ class TestConcurrentSet(GlusterBaseClass):
self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume)
g.log.info("Volume deleted successfully : %s", volume)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_concurrent_set(self):
# time stamp of current test case
diff --git a/tests/functional/glusterd/test_create_vol_with_used_bricks.py b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
index e37741d56..a5f94c077 100644
--- a/tests/functional/glusterd/test_create_vol_with_used_bricks.py
+++ b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
"""
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.brick_ops import add_brick
@@ -37,16 +38,14 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.counter = 1
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to "
"clients %s" % cls.clients)
@@ -78,7 +77,7 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
"dir's of deleted volume")
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_create_vol_used_bricks(self):
'''
@@ -107,12 +106,14 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
g.log.info("Bricks added successfully to the volume %s", self.volname)
# Mounting the volume.
- ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
- mpoint=self.mounts[0].mountpoint,
- mserver=self.mnode,
- mclient=self.mounts[0].client_system)
- self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
- g.log.info("Volume mounted successfully : %s", self.volname)
+ for mount_obj in self.mounts:
+ ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
+ mpoint=mount_obj.mountpoint,
+ mserver=self.mnode,
+ mclient=mount_obj.client_system)
+ self.assertEqual(ret, 0, ("Volume %s is not mounted") % (
+ self.volname))
+ g.log.info("Volume mounted successfully : %s", self.volname)
# run IOs
g.log.info("Starting IO on all mounts...")
@@ -120,11 +121,9 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
- "--dirname-start-num %d "
- "--dir-depth 2 "
- "--dir-length 5 "
- "--max-num-of-dirs 3 "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d --dir-depth 2 "
+ "--dir-length 5 --max-num-of-dirs 3 "
"--num-of-files 10 %s" % (self.script_upload_path,
self.counter,
mount_obj.mountpoint))
@@ -137,14 +136,15 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
# Validate IO
self.assertTrue(
validate_io_procs(self.all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
+ "IO failed on some of the clients")
# Unmouting the volume.
- ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
- mpoint=self.mounts[0].mountpoint)
- self.assertEqual(ret, 0, ("Volume %s is not unmounted") % self.volname)
- g.log.info("Volume unmounted successfully : %s", self.volname)
+ for mount_obj in self.mounts:
+ ret, _, _ = umount_volume(mclient=mount_obj.client_system,
+ mpoint=mount_obj.mountpoint)
+ self.assertEqual(ret, 0, "Volume %s is not unmounted" % (
+ self.volname))
+ g.log.info("Volume unmounted successfully : %s", self.volname)
# Getting brick list
self.brick_list = get_all_bricks(self.mnode, self.volname)
diff --git a/tests/functional/glusterd/test_default_log_level_of_cli.py b/tests/functional/glusterd/test_default_log_level_of_cli.py
new file mode 100644
index 000000000..76ad06d66
--- /dev/null
+++ b/tests/functional/glusterd/test_default_log_level_of_cli.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Test to check that default log level of CLI should be INFO
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import (volume_start, volume_status,
+ volume_info, volume_stop)
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'arbiter',
+ 'distributed-arbiter'], ['glusterfs']])
+class TestDefaultLogLevelOfCLI(GlusterBaseClass):
+ def setUp(self):
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Creating and starting the volume
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Volume creation/start operation"
+ " failed: %s" % self.volname)
+ g.log.info("Volme created and started successfully : %s", self.volname)
+
+ def tearDown(self):
+ # Stopping the volume and Cleaning up the volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Volume deleted successfully : %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_default_log_level_of_cli(self):
+ """
+ Test Case:
+ 1) Create and start a volume
+ 2) Run volume info command
+ 3) Run volume status command
+ 4) Run volume stop command
+ 5) Run volume start command
+ 6) Check the default log level of cli.log
+ """
+ # Check volume info operation
+ ret, _, _ = volume_info(self.mnode)
+ self.assertEqual(ret, 0, "Failed to execute volume info"
+ " command on node: %s" % self.mnode)
+ g.log.info("Successfully executed the volume info command on"
+ " node: %s", self.mnode)
+
+ # Check volume status operation
+ ret, _, _ = volume_status(self.mnode)
+ self.assertEqual(ret, 0, "Failed to execute volume status command"
+ " on node: %s" % self.mnode)
+ g.log.info("Successfully executed the volume status command"
+ " on node: %s", self.mnode)
+
+ # Check volume stop operation
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to stop the volume %s on node: %s"
+ % (self.volname, self.mnode))
+ g.log.info("Successfully stopped the volume %s on node: %s",
+ self.volname, self.mnode)
+
+ # Check volume start operation
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start the volume %s on node: %s"
+ % (self.volname, self.mnode))
+ g.log.info("Successfully started the volume %s on node: %s",
+ self.volname, self.mnode)
+
+ # Check the default log level of cli.log
+ cmd = 'cat /var/log/glusterfs/cli.log | grep -F "] D [" | wc -l'
+ ret, out, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to execute the command")
+ self.assertEqual(int(out), 0, "Unexpected: Default log level of "
+ "cli.log is not INFO")
+ g.log.info("Default log level of cli.log is INFO as expected")
diff --git a/tests/functional/glusterd/test_default_max_bricks_per_process.py b/tests/functional/glusterd/test_default_max_bricks_per_process.py
new file mode 100644
index 000000000..b20c1bccd
--- /dev/null
+++ b/tests/functional/glusterd/test_default_max_bricks_per_process.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Default max bricks per-process should be 250
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import (get_volume_options,
+ reset_volume_option,
+ set_volume_options)
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'arbiter',
+ 'distributed-arbiter'], ['glusterfs']])
+class TestDefaultMaxBricksPerProcess(GlusterBaseClass):
+ def setUp(self):
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Volume creation failed: %s"
+ % self.volname)
+ g.log.info("Volume created successfully : %s", self.volname)
+
+ def tearDown(self):
+ # Cleaning up the volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup the volume %s"
+ % self.volname)
+ g.log.info("Volume deleted successfully: %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_default_max_bricks_per_process(self):
+ """
+ Test Case:
+ 1) Create a volume and start it.
+ 2) Fetch the max bricks per process value
+ 3) Reset the volume options
+ 4) Fetch the max bricks per process value
+ 5) Compare the value fetched in last step with the initial value
+ 6) Enable brick-multiplexing in the cluster
+ 7) Fetch the max bricks per process value
+ 8) Compare the value fetched in last step with the initial value
+ """
+ # Fetch the max bricks per process value
+ ret = get_volume_options(self.mnode, 'all')
+ self.assertIsNotNone(ret, "Failed to execute the volume get command")
+ initial_value = ret['cluster.max-bricks-per-process']
+ g.log.info("Successfully fetched the max bricks per-process value")
+
+ # Reset the volume options
+ ret, _, _ = reset_volume_option(self.mnode, 'all', 'all')
+ self.assertEqual(ret, 0, "Failed to reset the volumes")
+ g.log.info("Volumes reset was successful")
+
+ # Fetch the max bricks per process value
+ ret = get_volume_options(self.mnode, 'all')
+ self.assertIsNotNone(ret, "Failed to execute the volume get command")
+
+ # Comparing the values
+ second_value = ret['cluster.max-bricks-per-process']
+ self.assertEqual(initial_value, second_value, "Unexpected: Max"
+ " bricks per-process value is not equal")
+
+ # Enable brick-multiplex in the cluster
+ ret = set_volume_options(self.mnode, 'all',
+ {'cluster.brick-multiplex': 'enable'})
+ self.assertTrue(ret, "Failed to enable brick-multiplex"
+ " for the cluster")
+ g.log.info("Successfully enabled brick-multiplex in the cluster")
+
+ # Fetch the max bricks per process value
+ ret = get_volume_options(self.mnode, 'all')
+ self.assertIsNotNone(ret, "Failed to execute the volume get command")
+
+ # Comparing the values
+ third_value = ret['cluster.max-bricks-per-process']
+ self.assertEqual(initial_value, third_value, "Unexpected: Max bricks"
+ " per-process value is not equal")
diff --git a/tests/functional/glusterd/test_default_ping_timer_and_epoll_thread_count.py b/tests/functional/glusterd/test_default_ping_timer_and_epoll_thread_count.py
new file mode 100644
index 000000000..4ffe047d3
--- /dev/null
+++ b/tests/functional/glusterd/test_default_ping_timer_and_epoll_thread_count.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Tests to check by default ping timer is disabled and epoll
+ thread count is 1
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+
+
+class TestPingTimerAndEpollThreadCountDefaultValue(GlusterBaseClass):
+ def tearDown(self):
+ # Remvoing the test script created during the test
+ cmd = "rm -f test.sh;"
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to remove the test script")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_ping_timer_disbaled_and_epoll_thread_count_default_value(self):
+ """
+ Test Steps:
+ 1. Start glusterd
+ 2. Check ping timeout value in glusterd.vol should be 0
+ 3. Create a test script for epoll thread count
+ 4. Source the test script
+ 5. Fetch the pid of glusterd
+ 6. Check epoll thread count of glusterd should be 1
+ """
+ # Fetch the ping timeout value from glusterd.vol file
+ cmd = "cat /etc/glusterfs/glusterd.vol | grep -i ping-timeout"
+ ret, out, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to get ping-timeout value from"
+ " glusterd.vol file")
+
+ # Check if the default value is 0
+ self.ping_value = out.split("ping-timeout")
+ self.ping_value[1] = (self.ping_value[1]).strip()
+ self.assertEqual(int(self.ping_value[1]), 0, "Unexpected: Default"
+ " value of ping-timeout is not 0")
+
+ # Shell Script to be run for epoll thread count
+ script = """
+ #!/bin/bash
+ function nepoll ()
+ {
+ local pid=$1;
+ for i in $(ls /proc/$pid/task);
+ do
+ cat /proc/$pid/task/$i/stack | grep -i 'sys_epoll_wait';
+ done
+ }
+ """
+
+ # Execute the shell script
+ cmd = "echo '{}' > test.sh;".format(script)
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to create the file with the script")
+
+ # Fetch the pid of glusterd
+ cmd = "pidof glusterd"
+ ret, pidof_glusterd, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to get the pid of glusterd")
+ pidof_glusterd = int(pidof_glusterd)
+
+ # Check the epoll thread count of glusterd
+ cmd = "source test.sh; nepoll %d | wc -l" % pidof_glusterd
+ ret, count, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to get the epoll thread count")
+ self.assertEqual(int(count), 1, "Unexpected: Default epoll thread"
+ "count is not 1")
diff --git a/tests/functional/glusterd/test_detach_node_used_to_mount.py b/tests/functional/glusterd/test_detach_node_used_to_mount.py
index 330899b5c..bbc2acc77 100644
--- a/tests/functional/glusterd/test_detach_node_used_to_mount.py
+++ b/tests/functional/glusterd/test_detach_node_used_to_mount.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,7 +14,6 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-from time import sleep
from random import randint
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
@@ -26,7 +25,7 @@ from glustolibs.gluster.rebalance_ops import (rebalance_start,
rebalance_stop)
from glustolibs.gluster.peer_ops import (peer_detach,
peer_probe,
- is_peer_connected)
+ wait_for_peers_to_connect)
from glustolibs.gluster.brick_ops import add_brick
from glustolibs.gluster.mount_ops import mount_volume, umount_volume
from glustolibs.gluster.glusterfile import (get_fattr, file_exists,
@@ -39,7 +38,7 @@ class TestChangeReservcelimit(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Override Volumes setup
cls.volume['voltype'] = {
@@ -66,12 +65,10 @@ class TestChangeReservcelimit(GlusterBaseClass):
g.log.info("Peer probe successful %s", self.servers[4])
# Wait till peers are in connected state
- count = 0
- while count < 60:
- ret = is_peer_connected(self.mnode, self.servers)
- if ret:
- break
- sleep(3)
+ for server in self.servers:
+ ret = wait_for_peers_to_connect(self.mnode, server)
+ self.assertTrue(ret, "glusterd is not connected %s with peer %s"
+ % (self.mnode, server))
# Unmounting and cleaning volume
ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
@@ -84,7 +81,7 @@ class TestChangeReservcelimit(GlusterBaseClass):
if not ret:
raise ExecutionError("Unable to delete volume %s" % self.volname)
g.log.info("Volume deleted successfully %s", self.volname)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_detach_node_used_to_mount(self):
# pylint: disable=too-many-statements
diff --git a/tests/functional/glusterd/test_enable_brickmux_create_and_stop_three_volumes.py b/tests/functional/glusterd/test_enable_brickmux_create_and_stop_three_volumes.py
index 9ae673100..40b2a19f0 100644
--- a/tests/functional/glusterd/test_enable_brickmux_create_and_stop_three_volumes.py
+++ b/tests/functional/glusterd/test_enable_brickmux_create_and_stop_three_volumes.py
@@ -56,7 +56,7 @@ class TestEnableBrickMuxCreateAndStopThreevolumes(GlusterBaseClass):
raise ExecutionError("Failed to disable cluster.brick-multiplex")
g.log.info("Successfully set cluster.brick-multiplex to disable.")
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_enable_brickmux_create_and_stop_three_volumes(self):
diff --git a/tests/functional/glusterd/test_enable_storage_reserve_volume.py b/tests/functional/glusterd/test_enable_storage_reserve_volume.py
new file mode 100644
index 000000000..b930cad87
--- /dev/null
+++ b/tests/functional/glusterd/test_enable_storage_reserve_volume.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ This test case is authored to test posix storage.reserve option.
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import set_volume_options
+
+
+@runs_on([['distributed-replicated'], ['glusterfs']])
+class TestPosixStorageReserveOption(GlusterBaseClass):
+ def setUp(self):
+ """
+ setUp method for every test.
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # setup volume
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Failed to setup volume")
+
+ def test_enable_storage_reserve_volume(self):
+ """
+ 1) Create a distributed-replicated volume and start it.
+ 2) Enable storage.reserve option on the volume using below command,
+ gluster volume set storage.reserve.
+ let's say, set it to a value of 50.
+ 3) Mount the volume on a client
+ 4) check df -h output of the mount point and backend bricks.
+ """
+ # Set volume option storage.reserve 50
+ ret = set_volume_options(
+ self.mnode, self.volname, {"storage.reserve ": 50})
+ self.assertTrue(
+ ret, "gluster volume set {} storage.reserve 50 Failed on server "
+ "{}".format(self.volname, self.mnode))
+ # Mounting the volume on a client
+ ret = self.mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to mount volume")
+
+ ret, out, _ = g.run(
+ self.clients[0], "df -h | grep -i '{}'".format(
+ self.mounts[0].mountpoint))
+ self.assertFalse(
+ ret, "Failed to run cmd df -h on client {}".format
+ (self.clients[0]))
+
+ self.assertTrue("51%" in out.split(" "), "51 % is not in list ")
+
+ def tearDown(self):
+ """Tear Down callback"""
+ # Unmount volume and cleanup.
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to unmount and cleanup volume")
+ g.log.info("Successful in unmount and cleanup operations")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_enabling_glusterd_debug_mode.py b/tests/functional/glusterd/test_enabling_glusterd_debug_mode.py
index 7a355f861..7e431ba69 100644
--- a/tests/functional/glusterd/test_enabling_glusterd_debug_mode.py
+++ b/tests/functional/glusterd/test_enabling_glusterd_debug_mode.py
@@ -18,19 +18,20 @@ from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_init import (start_glusterd, stop_glusterd)
-from glustolibs.gluster.volume_ops import get_volume_info
+from glustolibs.gluster.gluster_init import (start_glusterd, stop_glusterd,
+ restart_glusterd)
from glustolibs.gluster.gluster_init import is_glusterd_running
from glustolibs.gluster.glusterfile import (move_file,
find_and_replace_in_file,
check_if_pattern_in_file)
+from glustolibs.misc.misc_libs import daemon_reload
class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
def setUp(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# check whether peers are in connected state
ret = self.validate_peers_are_connected()
@@ -72,6 +73,12 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
raise ExecutionError("Reverting glusterd log failed.")
g.log.info("Reverting of glusterd log successful.")
+ # Daemon should be reloaded as unit file is changed
+ ret = daemon_reload(self.mnode)
+ if not ret:
+ raise ExecutionError("Unable to reload the daemon")
+ g.log.info("Daemon reloaded successfully")
+
# Restart glusterd
ret = start_glusterd(self.mnode)
if not ret:
@@ -104,7 +111,7 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
raise ExecutionError("Peers are not in connected state.")
g.log.info("Peers are in connected state.")
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_enabling_gluster_debug_mode(self):
@@ -139,13 +146,17 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
self.assertTrue(ret, "Renaming the glusterd log is failed")
g.log.info("Successfully renamed glusterd.log file.")
+ # Daemon reloading as the unit file of the daemon changed
+ ret = daemon_reload(self.mnode)
+ self.assertTrue(ret, "Daemon reloaded successfully")
+
# Start glusterd
ret = start_glusterd(self.mnode)
self.assertTrue(ret, "Failed to start glusterd on %s"
% self.mnode)
g.log.info('Successfully to started glusterd.')
- # Check if glusterd is runnibg or not.
+ # Check if glusterd is running or not.
count = 0
while count < 60:
ret = is_glusterd_running(self.mnode)
@@ -156,14 +167,21 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
self.assertEqual(ret, 0, "glusterd is not running on %s" % self.mnode)
g.log.info('glusterd is running after changing log_level to debug.')
- # Issue some gluster commands
+ # Instead of executing commands in loop, if glusterd is restarted in
+ # one of the nodes in the cluster the handshake messages
+ # will be in debug mode.
+ ret = restart_glusterd(self.servers[1])
+ self.assertTrue(ret, "restarted successfully")
+
count = 0
- while count < 9:
- ret = get_volume_info(self.mnode)
- self.assertIsNotNone(ret, "Failed to get volume info")
+ while count < 60:
+ ret = is_glusterd_running(self.mnode)
+ if ret:
+ break
sleep(2)
count += 1
- g.log.info("Successfully got volume info 9 times.")
+ self.assertEqual(ret, 0, "glusterd is not running on %s" % self.mnode)
+ g.log.info('glusterd is running after changing log_level to debug.')
# Check glusterd logs for debug messages
glusterd_log_file = "/var/log/glusterfs/glusterd.log"
diff --git a/tests/functional/glusterd/test_get_state_on_brick_unmount.py b/tests/functional/glusterd/test_get_state_on_brick_unmount.py
new file mode 100644
index 000000000..a2caef214
--- /dev/null
+++ b/tests/functional/glusterd/test_get_state_on_brick_unmount.py
@@ -0,0 +1,126 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from random import choice
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume,)
+from glustolibs.gluster.volume_ops import (get_gluster_state, get_volume_list)
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.glusterfile import check_if_pattern_in_file
+
+
+@runs_on([['distributed', 'replicated',
+ 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed',
+ 'arbiter', 'distributed-arbiter'], []])
+class TestGetStateOnBrickUnmount(GlusterBaseClass):
+ """
+ Tests to verify 'gluster get state' command on unmounting the brick from
+ an online volume
+ """
+
+ @classmethod
+ def setUpClass(cls):
+
+ cls.get_super_method(cls, 'setUpClass')()
+
+ ret = cls.validate_peers_are_connected()
+ if not ret:
+ raise ExecutionError("Failed to validate peers are in connected")
+ g.log.info("Successfully validated peers are in connected state")
+
+ def tearDown(self):
+
+ # Mount the bricks which are unmounted as part of test
+ if getattr(self, 'umount_host', None) and getattr(self, 'umount_brick',
+ None):
+ ret, _, _ = g.run(self.umount_host, 'mount -a')
+ if ret:
+ raise ExecutionError("Not able to mount unmounted brick on "
+ "{}".format(self.umount_host))
+
+ vol_list = get_volume_list(self.mnode)
+ if vol_list:
+ for volume in vol_list:
+ ret = cleanup_volume(self.mnode, volume)
+ if ret:
+ g.log.info("Volume deleted successfully %s", volume)
+ else:
+ raise ExecutionError(
+ "Not able to delete volume {}".format(volume))
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_get_state_on_brick_unmount(self):
+ """
+ Steps:
+ 1. Form a gluster cluster by peer probing and create a volume
+ 2. Unmount the brick using which the volume is created
+ 3. Run 'gluster get-state' and validate absence of error 'Failed to get
+ daemon state. Check glusterd log file for more details'
+ 4. Create another volume and start it using different bricks which are
+ not used to create above volume
+ 5. Run 'gluster get-state' and validate the absence of above error.
+ """
+ # Setup Volume
+ ret = setup_volume(mnode=self.mnode,
+ all_servers_info=self.all_servers_info,
+ volume_config=self.volume, create_only=True)
+ self.assertTrue(ret, "Failed to setup volume {}".format(self.volname))
+ g.log.info("Successful in setting up volume %s", self.volname)
+
+ # Select one of the bricks in the volume to unmount
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ self.assertIsNotNone(brick_list, ("Not able to get list of bricks "
+ "of volume %s", self.volname))
+
+ select_brick = choice(brick_list)
+ self.umount_host, self.umount_brick = (
+ select_brick[0:select_brick.rfind('/')].split(':'))
+
+ # Verify mount entry in /etc/fstab
+ ret = check_if_pattern_in_file(self.umount_host,
+ self.umount_brick, '/etc/fstab')
+ self.assertEqual(ret, 0, "Fail: Brick mount entry is not"
+ " found in /etc/fstab of {}".format(self.umount_host))
+
+ # Unmount the selected brick
+ cmd = 'umount {}'.format(self.umount_brick)
+ ret, _, _ = g.run(self.umount_host, cmd)
+ self.assertEqual(0, ret, "Fail: Not able to unmount {} on "
+ "{}".format(self.umount_brick, self.umount_host))
+
+ # Run 'gluster get-state' and verify absence of any error
+ ret = get_gluster_state(self.mnode)
+ self.assertIsNotNone(ret, "Fail: 'gluster get-state' didn't dump the "
+ "state of glusterd when {} unmounted from "
+ "{}".format(self.umount_brick, self.umount_host))
+
+ # Create another volume
+ self.volume['name'] = 'second_volume'
+ ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
+ self.assertTrue(ret, 'Failed to create and start volume')
+ g.log.info('Second volume created and started successfully')
+
+ # Run 'gluster get-state' and verify absence of any error after
+ # creation of second-volume
+ ret = get_gluster_state(self.mnode)
+ self.assertIsNotNone(ret, "Fail: 'gluster get-state' didn't dump the "
+ "state of glusterd ")
diff --git a/tests/functional/glusterd/test_getstate_shows_correct_brick_status_when_brick_killed.py b/tests/functional/glusterd/test_getstate_shows_correct_brick_status_when_brick_killed.py
new file mode 100644
index 000000000..dff7aa9ef
--- /dev/null
+++ b/tests/functional/glusterd/test_getstate_shows_correct_brick_status_when_brick_killed.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
+from glustolibs.gluster.volume_ops import (volume_stop,
+ volume_start,
+ get_gluster_state)
+from glustolibs.gluster.brick_libs import (get_offline_bricks_list,
+ bring_bricks_online,
+ get_online_bricks_list,
+ bring_bricks_offline)
+
+
+@runs_on([['distributed-dispersed', 'replicated', 'arbiter',
+ 'distributed-replicated', 'distributed', 'dispersed',
+ 'distributed-arbiter'],
+ ['glusterfs']])
+class TestGetStateBrickStatus(GlusterBaseClass):
+
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ def test_validate_get_state(self):
+ """
+ TestCase:
+ 1. Execute "gluster get-state" say on N1(Node1)
+ 2. Start one by one volume and check brick status in get-state output
+ 3. Make sure there are multiple glusterfsd on one node say N1
+ Kill one glusterfsd (kill -9 <piod>) and check
+ 4. Execute "gluster get-state" on N1
+ """
+ # Stop Volume
+ ret, _, _ = volume_stop(self.mnode, self.volname, force=True)
+ self.assertEqual(ret, 0, ("Failed to stop the volume "
+ "%s", self.volname))
+
+ # Execute 'gluster get-state' on mnode
+ get_state_data = get_gluster_state(self.mnode)
+ self.assertIsNotNone(get_state_data, "Getting gluster state failed.")
+
+ # Getting Brick 1 Status - It should be in Stopped State
+ brick_status = (get_state_data['Volumes']
+ ['volume1.brick1.status'].strip())
+ self.assertEqual(brick_status, "Stopped",
+ "The brick is not in Stopped State")
+
+ # Start the volume and check the status of brick again
+ ret, _, _ = volume_start(self.mnode, self.volname, force=True)
+ self.assertFalse(ret, 'Failed to start volume %s with "force" option'
+ % self.volname)
+
+ # Execute 'gluster get-state' on mnode
+ get_state_data = get_gluster_state(self.mnode)
+ self.assertIsNotNone(get_state_data, "Getting gluster state failed.")
+ # Getting Brick 1 Status - It should be in Started State
+ brick_status = (get_state_data['Volumes']
+ ['volume1.brick1.status'].strip())
+ self.assertEqual(brick_status, "Started",
+ "The brick is not in Started State")
+
+ # Bringing the brick offline
+ vol_bricks = get_online_bricks_list(self.mnode, self.volname)
+ ret = bring_bricks_offline(self.volname, vol_bricks[0])
+ self.assertTrue(ret, 'Failed to bring brick %s offline' %
+ vol_bricks[0])
+
+ # Execute 'gluster get-state' on mnode
+ get_state_data = get_gluster_state(self.mnode)
+ self.assertIsNotNone(get_state_data, "Getting gluster state failed.")
+ # Getting Brick 1 Status - It should be in Stopped State
+ brick_status = (get_state_data['Volumes']
+ ['volume1.brick1.status'].strip())
+ self.assertEqual(brick_status, "Stopped",
+ "The brick is not in Stopped State")
+ g.log.info("Brick 1 is in Stopped state as expected.")
+
+ # Checking the server 2 for the status of Brick.
+ # It should be 'Started' state
+ node2 = self.servers[1]
+ get_state_data = get_gluster_state(node2)
+ self.assertIsNotNone(get_state_data, "Getting gluster state failed.")
+ # Getting Brick 2 Status - It should be in Started State
+ brick_status = (get_state_data['Volumes']
+ ['volume1.brick2.status'].strip())
+ self.assertEqual(brick_status, "Started",
+ "The brick is not in Started State")
+ g.log.info("Brick2 is in started state.")
+
+ # Bringing back the offline brick
+ offline_brick = get_offline_bricks_list(self.mnode, self.volname)
+ ret = bring_bricks_online(self.mnode, self.volname,
+ offline_brick)
+ self.assertTrue(ret, 'Failed to bring brick %s online' %
+ offline_brick)
+
+ def tearDown(self):
+ # stopping the volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume & Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
+
+ # calling GlusterBaseClass tearDownClass
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_gluster_detect_drop_of_outbound_traffic.py b/tests/functional/glusterd/test_gluster_detect_drop_of_outbound_traffic.py
new file mode 100644
index 000000000..1a45d5c82
--- /dev/null
+++ b/tests/functional/glusterd/test_gluster_detect_drop_of_outbound_traffic.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Gluster should detect drop of outbound traffic as network failure
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.peer_ops import nodes_from_pool_list, get_peer_status
+from glustolibs.gluster.volume_ops import volume_status
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'arbiter',
+ 'distributed-arbiter'], ['glusterfs']])
+class TestGlusterDetectDropOfOutboundTrafficAsNetworkFailure(GlusterBaseClass):
+ def setUp(self):
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Volume creation failed: %s"
+ % self.volname)
+ g.log.info("Volume created successfully : %s", self.volname)
+
+ def tearDown(self):
+ # Removing the status_err file and the iptable rule,if set previously
+ if self.iptablerule_set:
+ cmd = "iptables -D OUTPUT -p tcp -m tcp --dport 24007 -j DROP"
+ ret, _, _ = g.run(self.servers[1], cmd)
+ if ret:
+ raise ExecutionError("Failed to remove the iptable rule"
+ " for glusterd")
+
+ # Cleaning up the volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup the volume %s"
+ % self.volname)
+ g.log.info("Volume deleted successfully: %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_gluster_detect_drop_of_out_traffic_as_network_failure(self):
+ """
+ Test Case:
+ 1) Create a volume and start it.
+ 2) Add an iptable rule to drop outbound glusterd traffic
+ 3) Check if the rule is added in iptables list
+ 4) Execute few Gluster CLI commands like volume status, peer status
+ 5) Gluster CLI commands should fail with suitable error message
+ """
+ # Set iptablerule_set as false initially
+ self.iptablerule_set = False
+
+ # Set iptable rule on one node to drop outbound glusterd traffic
+ cmd = "iptables -I OUTPUT -p tcp --dport 24007 -j DROP"
+ ret, _, _ = g.run(self.servers[1], cmd)
+ self.assertEqual(ret, 0, "Failed to set iptable rule on the node: %s"
+ % self.servers[1])
+ g.log.info("Successfully added the rule to iptable")
+
+ # Update iptablerule_set to true
+ self.iptablerule_set = True
+
+ # Confirm if the iptable rule was added successfully
+ iptable_rule = "'OUTPUT -p tcp -m tcp --dport 24007 -j DROP'"
+ cmd = "iptables -S OUTPUT | grep %s" % iptable_rule
+ ret, _, _ = g.run(self.servers[1], cmd)
+ self.assertEqual(ret, 0, "Failed to get the rule from iptable")
+
+ # Fetch number of nodes in the pool, except localhost
+ pool_list = nodes_from_pool_list(self.mnode)
+ peers_count = len(pool_list) - 1
+
+ # Gluster CLI commands should fail
+ # Check volume status command
+ ret, _, err = volume_status(self.servers[1])
+ self.assertEqual(ret, 2, "Unexpected: gluster volume status command"
+ " did not return any error")
+
+ status_err_count = err.count("Staging failed on")
+ self.assertEqual(status_err_count, peers_count, "Unexpected: No. of"
+ " nodes on which vol status cmd failed is not equal"
+ " to peers_count value")
+ g.log.info("Volume status command failed with expected error message")
+
+ # Check peer status command and all peers are in 'Disconnected' state
+ peer_list = get_peer_status(self.servers[1])
+
+ for peer in peer_list:
+ self.assertEqual(int(peer["connected"]), 0, "Unexpected: All"
+ " the peers are not in 'Disconnected' state")
+ self.assertEqual(peer["stateStr"], "Peer in Cluster", "Unexpected:"
+ " All the peers not in 'Peer in Cluster' state")
+
+ g.log.info("Peer status command listed all the peers in the"
+ "expected state")
diff --git a/tests/functional/glusterd/test_gluster_does_not_do_posix_lock_when_multiple_client.py b/tests/functional/glusterd/test_gluster_does_not_do_posix_lock_when_multiple_client.py
new file mode 100644
index 000000000..e6cf31e9e
--- /dev/null
+++ b/tests/functional/glusterd/test_gluster_does_not_do_posix_lock_when_multiple_client.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed',
+ 'arbiter', 'distributed-arbiter'], ['glusterfs']])
+class TestFlock(GlusterBaseClass):
+ def setUp(self):
+ """
+ setUp method for every test
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Volume creation failed: %s" % self.volname)
+
+ def tearDown(self):
+ """
+ TearDown for every test
+ """
+ # Stopping the volume and Cleaning up the volume
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if not ret:
+ raise ExecutionError(
+ "Failed Cleanup the Volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_gluster_does_not_do_posix_lock_when_multiple_client(self):
+ """
+ Steps:
+ 1. Create all types of volumes.
+ 2. Mount the brick on two client mounts
+ 3. Prepare same script to do flock on the two nodes
+ while running this script it should not hang
+ 4. Wait till 300 iteration on both the node
+ """
+
+ # Shell Script to be run on mount point
+ script = """
+ #!/bin/bash
+ flock_func(){
+ file=/bricks/brick0/test.log
+ touch $file
+ (
+ flock -xo 200
+ echo "client1 do something" > $file
+ sleep 1
+ ) 300>$file
+ }
+ i=1
+ while [ "1" = "1" ]
+ do
+ flock_func
+ ((i=i+1))
+ echo $i
+ if [[ $i == 300 ]]; then
+ break
+ fi
+ done
+ """
+ mount_point = self.mounts[0].mountpoint
+ cmd = "echo '{}' >'{}'/test.sh; sh '{}'/test.sh ".format(
+ script, mount_point, mount_point)
+ ret = g.run_parallel(self.clients[:2], cmd)
+
+ # Check if 300 is present in the output
+ for client_ip, _ in ret.items():
+ self.assertTrue("300" in ret[client_ip][1].split("\n"),
+ "300 iteration is not completed")
+ self.assertFalse(ret[client_ip][0], "Failed to run the cmd ")
diff --git a/tests/functional/glusterd/test_gluster_volume_status_xml_dump.py b/tests/functional/glusterd/test_gluster_volume_status_xml_dump.py
new file mode 100644
index 000000000..eacc0b3c5
--- /dev/null
+++ b/tests/functional/glusterd/test_gluster_volume_status_xml_dump.py
@@ -0,0 +1,106 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Test Default volume behavior and quorum options
+"""
+from time import sleep
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.volume_libs import cleanup_volume
+from glustolibs.gluster.volume_ops import (
+ volume_stop, get_volume_status,
+ volume_create, volume_start
+)
+
+
+@runs_on([['distributed-arbiter'],
+ ['glusterfs']])
+class GetVolumeStatusXmlDump(GlusterBaseClass):
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Fetching all the parameters for volume_create
+ list_of_three_servers = []
+ server_info_for_three_nodes = {}
+
+ for server in self.servers[0:3]:
+ list_of_three_servers.append(server)
+ server_info_for_three_nodes[server] = self.all_servers_info[
+ server]
+
+ bricks_list = form_bricks_list(
+ self.mnode, self.volname, 3, list_of_three_servers,
+ server_info_for_three_nodes)
+ # Creating 2nd volume
+ self.volname_2 = "test_volume"
+ ret, _, _ = volume_create(self.mnode, self.volname_2,
+ bricks_list)
+ self.assertFalse(ret, "Volume creation failed")
+ g.log.info("Volume %s created successfully", self.volname_2)
+ ret, _, _ = volume_start(self.mnode, self.volname_2)
+ if ret:
+ raise ExecutionError(
+ "Failed to start volume {}".format(self.volname_2))
+ # Setup and mount the volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup volume and mount it")
+
+ def test_gluster_volume_status_xml_dump(self):
+ """
+ Setps:
+ 1. stop one of the volume
+ (i.e) gluster volume stop <vol-name>
+ 2. Get the status of the volumes with --xml dump
+ XML dump should be consistent
+ """
+ ret, _, _ = volume_stop(self.mnode, volname=self.volname_2,
+ force=True)
+ self.assertFalse(ret,
+ "Failed to stop volume '{}'".format(
+ self.volname_2))
+ out = get_volume_status(self.mnode)
+ self.assertIsNotNone(
+ out, "Failed to get volume status on {}".format(self.mnode))
+ for _ in range(4):
+ sleep(2)
+ out1 = get_volume_status(self.mnode)
+ self.assertIsNotNone(
+ out1, "Failed to get volume status on {}".format(
+ self.mnode))
+ self.assertEqual(out1, out)
+
+ def tearDown(self):
+ """tear Down Callback"""
+ ret = cleanup_volume(self.mnode, self.volname_2)
+ if not ret:
+ raise ExecutionError(
+ "Failed to remove volume '{}'".format(self.volname_2))
+ # Unmount volume and cleanup.
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount and Cleanup volume")
+ g.log.info("Successful in unmount and cleanup operations")
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_glusterd_default_volume_behavior_quorum_options.py b/tests/functional/glusterd/test_glusterd_default_volume_behavior_quorum_options.py
new file mode 100644
index 000000000..b2652a4ea
--- /dev/null
+++ b/tests/functional/glusterd/test_glusterd_default_volume_behavior_quorum_options.py
@@ -0,0 +1,144 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Test Default volume behavior and quorum options
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import (
+ get_volume_options,
+ volume_reset)
+from glustolibs.gluster.gluster_init import (
+ stop_glusterd,
+ start_glusterd,
+ is_glusterd_running,
+ wait_for_glusterd_to_start)
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.brickmux_ops import get_brick_processes_count
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
+
+
+@runs_on([['replicated', 'arbiter', 'dispersed', 'distributed',
+ 'distributed-replicated', 'distributed-arbiter'],
+ ['glusterfs']])
+class TestGlusterDDefaultVolumeBehaviorQuorumOptions(GlusterBaseClass):
+ """ Testing default volume behavior and Quorum options """
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount the volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup volume and mount it")
+
+ def _validate_vol_options(self, option_name, option_value, for_all=False):
+ """ Function to validate default vol options """
+ if not for_all:
+ ret = get_volume_options(self.mnode, self.volname, option_name)
+ else:
+ ret = get_volume_options(self.mnode, 'all', option_name)
+ self.assertIsNotNone(ret, "The %s option is not present" % option_name)
+ value = (ret[option_name]).split()
+ self.assertEqual(value[0], option_value,
+ ("Volume option for %s is not equal to %s"
+ % (option_name, option_value)))
+ g.log.info("Volume option %s is equal to the expected value %s",
+ option_name, option_value)
+
+ def _get_total_brick_processes_count(self):
+ """
+ Function to find the total number of brick processes in the cluster
+ """
+ count = 0
+ self.brick_list = get_all_bricks(self.mnode, self.volname)
+ for brick in self.brick_list:
+ server = brick.split(":")[0]
+ count += get_brick_processes_count(server)
+ return count
+
+ def test_glusterd_default_vol_behavior_and_quorum_options(self):
+ """
+ Test default volume behavior and quorum options
+ 1. Create a volume and start it.
+ 2. Check that no quorum options are found in vol info.
+ 3. Kill two glusterd processes.
+ 4. There shouldn't be any effect to the running glusterfsd
+ processes.
+ """
+ # Check the default quorum options are correct.
+ self._validate_vol_options('cluster.server-quorum-type', 'off')
+ self._validate_vol_options('cluster.server-quorum-ratio',
+ '51', True)
+
+ # Get the count of number of glusterfsd processes running.
+ count_before_glusterd_kill = self._get_total_brick_processes_count()
+
+ # Kill two glusterd processes.
+ server_list = [self.servers[1], self.servers[2]]
+ ret = stop_glusterd(server_list)
+ self.assertTrue(ret, "Failed to stop glusterd on the specified nodes.")
+ ret = is_glusterd_running(server_list)
+ self.assertNotEqual(ret, 0, ("Glusterd is not stopped on the servers"
+ " where it was desired to be stopped."))
+ g.log.info("Glusterd processes stopped in the desired servers.")
+
+ # Get the count of number of glusterfsd processes running.
+ count_after_glusterd_kill = self._get_total_brick_processes_count()
+
+ # The count of glusterfsd processes should match
+ self.assertEqual(count_before_glusterd_kill, count_after_glusterd_kill,
+ ("Glusterfsd processes are affected."))
+ g.log.info("Glusterd processes are not affected.")
+
+ # Start glusterd on all servers.
+ ret = start_glusterd(self.servers)
+ self.assertTrue(ret, "Failed to Start glusterd on the specified"
+ " nodes")
+ g.log.info("Started glusterd on all nodes.")
+
+ # Wait for glusterd to restart.
+ ret = wait_for_glusterd_to_start(self.servers)
+ self.assertTrue(ret, "Glusterd not up on all nodes.")
+ g.log.info("Glusterd is up and running on all nodes.")
+
+ def tearDown(self):
+ """tear Down Callback"""
+ # Wait for peers to connect.
+ ret = wait_for_peers_to_connect(self.mnode, self.servers, 50)
+ if not ret:
+ raise ExecutionError("Peers are not in connected state.")
+
+ # Unmount volume and cleanup.
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to Unmount and Cleanup volume")
+ g.log.info("Successful in unmount and cleanup operations")
+
+ # Reset the cluster options.
+ ret = volume_reset(self.mnode, "all")
+ if not ret:
+ raise ExecutionError("Failed to Reset the cluster options.")
+ g.log.info("Successfully reset cluster options.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_glusterd_gluster_process_stop_start_cycle.py b/tests/functional/glusterd/test_glusterd_gluster_process_stop_start_cycle.py
new file mode 100644
index 000000000..3eb3518d2
--- /dev/null
+++ b/tests/functional/glusterd/test_glusterd_gluster_process_stop_start_cycle.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Checking gluster processes stop and start cycle.
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_libs import (
+ cleanup_volume,
+ wait_for_volume_process_to_be_online,
+ setup_volume)
+from glustolibs.gluster.gluster_init import (
+ start_glusterd,
+ wait_for_glusterd_to_start)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
+
+
+@runs_on([['distributed', 'replicated', 'arbiter', 'dispersed',
+ 'distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed'], ['glusterfs']])
+class TestGlusterdStartStopCycle(GlusterBaseClass):
+ """ Testing Glusterd stop and start cycle """
+
+ def _wait_for_gluster_process_online_state(self):
+ """
+ Function which waits for the glusterfs processes to come up
+ """
+ # Wait for glusterd to be online and validate it's running.
+ self.assertTrue(wait_for_glusterd_to_start(self.servers),
+ "glusterd not up on the desired nodes.")
+ g.log.info("Glusterd is up and running on desired nodes.")
+
+ # Wait for peers to connect
+ ret = wait_for_peers_to_connect(self.mnode, self.servers, 50)
+ self.assertTrue(ret, "Peers not in connected state.")
+ g.log.info("Peers in connected state.")
+
+ # Wait for all volume processes to be online
+ ret = wait_for_volume_process_to_be_online(self.mnode,
+ self.volname,
+ timeout=600)
+ self.assertTrue(ret, ("All volume processes not up."))
+ g.log.info("All volume processes are up.")
+
+ def test_glusterd_start_stop_cycle(self):
+ """
+ Test Glusterd stop-start cycle of gluster processes.
+ 1. Create a gluster volume.
+ 2. Kill all gluster related processes.
+ 3. Start glusterd service.
+ 4. Verify that all gluster processes are up.
+ 5. Repeat the above steps 5 times.
+ """
+ # Create and start a volume
+ ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
+ self.assertTrue(ret, "Failed to create and start volume")
+
+ for _ in range(5):
+ killed_gluster_process_count = []
+ # Kill gluster processes in all servers
+ for server in self.servers:
+ cmd = ('pkill --signal 9 -c -e "(glusterd|glusterfsd|glusterfs'
+ ')"|tail -1')
+ ret, out, err = g.run(server, cmd)
+ self.assertEqual(ret, 0, err)
+ killed_gluster_process_count.append(int(out))
+
+ # Start glusterd on all servers.
+ ret = start_glusterd(self.servers)
+ self.assertTrue(ret, ("Failed to restart glusterd on desired"
+ " nodes."))
+ g.log.info("Glusterd started on desired nodes.")
+
+ # Wait for gluster processes to come up.
+ self._wait_for_gluster_process_online_state()
+
+ spawned_gluster_process_count = []
+ # Get number of gluster processes spawned in all server
+ for server in self.servers:
+ cmd = ('pgrep -c "(glusterd|glusterfsd|glusterfs)"')
+ ret, out, err = g.run(server, cmd)
+ self.assertEqual(ret, 0, err)
+ spawned_gluster_process_count.append(int(out))
+
+ # Compare process count in each server.
+ for index, server in enumerate(self.servers):
+ self.assertEqual(killed_gluster_process_count[index],
+ spawned_gluster_process_count[index],
+ ("All processes not up and running on %s",
+ server))
+
+ def tearDown(self):
+ """ tear Down Callback """
+ # Wait for peers to connect
+ ret = wait_for_peers_to_connect(self.mnode, self.servers, 50)
+ if not ret:
+ raise ExecutionError("Peers are not in connected state.")
+
+ # Cleanup the volume
+ ret = cleanup_volume(self.mnode, self.volname)
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Successfully cleaned up the volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py b/tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py
index ed8ba0385..2ffa50392 100644
--- a/tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py
+++ b/tests/functional/glusterd/test_glusterd_logs_when_peer_detach.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -24,7 +24,8 @@ from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.peer_ops import (peer_detach_servers,
- peer_probe_servers)
+ peer_probe_servers,
+ is_peer_connected)
class GlusterdLogsWhilePeerDetach(GlusterBaseClass):
@@ -33,14 +34,17 @@ class GlusterdLogsWhilePeerDetach(GlusterBaseClass):
"""
tearDown for every test
"""
- # Peer probe detached server
- ret = peer_probe_servers(self.mnode, self.random_server)
+ # checking for peer status from every node
+ ret = is_peer_connected(self.mnode, self.servers)
if not ret:
- raise ExecutionError(ret, "Failed to probe detached server")
- g.log.info("peer probe is successful for %s", self.random_server)
+ ret = peer_probe_servers(self.mnode, self.random_server)
+ if not ret:
+ raise ExecutionError("Failed to peer probe failed in "
+ "servers %s" % self.random_server)
+ g.log.info("All peers are in connected state")
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_logs_while_peer_detach(self):
'''
diff --git a/tests/functional/glusterd/test_glusterd_memory_consumption_increase.py b/tests/functional/glusterd/test_glusterd_memory_consumption_increase.py
new file mode 100644
index 000000000..92c48da6f
--- /dev/null
+++ b/tests/functional/glusterd/test_glusterd_memory_consumption_increase.py
@@ -0,0 +1,207 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Increase in glusterd memory consumption on repetetive operations
+ for 100 volumes
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.volume_ops import (volume_stop, volume_delete,
+ get_volume_list,
+ volume_start)
+from glustolibs.gluster.gluster_init import (restart_glusterd,
+ wait_for_glusterd_to_start)
+from glustolibs.gluster.volume_libs import (bulk_volume_creation,
+ cleanup_volume)
+from glustolibs.gluster.volume_ops import set_volume_options
+
+
+class TestGlusterMemoryConsumptionIncrease(GlusterBaseClass):
+ def tearDown(self):
+ # Clean up all volumes
+ if self.volume_present:
+ vol_list = get_volume_list(self.mnode)
+ if vol_list is None:
+ raise ExecutionError("Failed to get the volume list")
+
+ for volume in vol_list:
+ ret = cleanup_volume(self.mnode, volume)
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
+
+ # Disable multiplex
+ ret = set_volume_options(self.mnode, 'all',
+ {'cluster.brick-multiplex': 'disable'})
+ self.assertTrue(ret, "Failed to enable brick-multiplex"
+ " for the cluster")
+
+ # Calling baseclass tearDown method
+ self.get_super_method(self, 'tearDown')()
+
+ def _volume_operations_in_loop(self):
+ """ Create, start, stop and delete 100 volumes in a loop """
+ # Create and start 100 volumes in a loop
+ self.volume_config = {
+ 'name': 'volume-',
+ 'servers': self.servers,
+ 'voltype': {'type': 'distributed-replicated',
+ 'dist_count': 2,
+ 'replica_count': 3},
+ }
+
+ ret = bulk_volume_creation(self.mnode, 100, self.all_servers_info,
+ self.volume_config, "", False, True)
+ self.assertTrue(ret, "Failed to create volumes")
+
+ self.volume_present = True
+
+ g.log.info("Successfully created all the volumes")
+
+ # Start 100 volumes in loop
+ for i in range(100):
+ self.volname = "volume-%d" % i
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start volume: %s"
+ % self.volname)
+
+ g.log.info("Successfully started all the volumes")
+
+ # Stop 100 volumes in loop
+ for i in range(100):
+ self.volname = "volume-%d" % i
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to stop volume: %s"
+ % self.volname)
+
+ g.log.info("Successfully stopped all the volumes")
+
+ # Delete 100 volumes in loop
+ for i in range(100):
+ self.volname = "volume-%d" % i
+ ret = volume_delete(self.mnode, self.volname)
+ self.assertTrue(ret, "Failed to delete volume: %s"
+ % self.volname)
+
+ self.volume_present = False
+
+ g.log.info("Successfully deleted all the volumes")
+
+ def _memory_consumption_for_all_nodes(self, pid_list):
+ """Fetch the memory consumption by glusterd process for
+ all the nodes
+ """
+ memory_consumed_list = []
+ for i, server in enumerate(self.servers):
+ # Get the memory consumption of glusterd in each node
+ cmd = "top -b -n 1 -p %d | awk 'FNR==8 {print $6}'" % pid_list[i]
+ ret, mem, _ = g.run(server, cmd)
+ self.assertEqual(ret, 0, "Failed to get the memory usage of"
+ " glusterd process")
+ mem = int(mem)//1024
+ memory_consumed_list.append(mem)
+
+ return memory_consumed_list
+
+ def test_glusterd_memory_consumption_increase(self):
+ """
+ Test Case:
+ 1) Enable brick-multiplex and set max-bricks-per-process to 3 in
+ the cluster
+ 2) Get the glusterd memory consumption
+ 3) Perform create,start,stop,delete operation for 100 volumes
+ 4) Check glusterd memory consumption, it should not increase by
+ more than 50MB
+ 5) Repeat steps 3-4 for two more time
+ 6) Check glusterd memory consumption it should not increase by
+ more than 10MB
+ """
+ # pylint: disable=too-many-locals
+ # Restarting glusterd to refresh its memory consumption
+ ret = restart_glusterd(self.servers)
+ self.assertTrue(ret, "Restarting glusterd failed")
+
+ # check if glusterd is running post reboot
+ ret = wait_for_glusterd_to_start(self.servers)
+ self.assertTrue(ret, "Glusterd service is not running post reboot")
+
+ # Enable brick-multiplex, set max-bricks-per-process to 3 in cluster
+ for key, value in (('cluster.brick-multiplex', 'enable'),
+ ('cluster.max-bricks-per-process', '3')):
+ ret = set_volume_options(self.mnode, 'all', {key: value})
+ self.assertTrue(ret, "Failed to set {} to {} "
+ " for the cluster".format(key, value))
+
+ # Get the pidof of glusterd process
+ pid_list = []
+ for server in self.servers:
+ # Get the pidof of glusterd process
+ cmd = "pidof glusterd"
+ ret, pid, _ = g.run(server, cmd)
+ self.assertEqual(ret, 0, "Failed to get the pid of glusterd")
+ pid = int(pid)
+ pid_list.append(pid)
+
+ # Fetch the list of memory consumed in all the nodes
+ mem_consumed_list = self._memory_consumption_for_all_nodes(pid_list)
+
+ # Perform volume operations for 100 volumes for first time
+ self._volume_operations_in_loop()
+
+ # Fetch the list of memory consumed in all the nodes after 1 iteration
+ mem_consumed_list_1 = self._memory_consumption_for_all_nodes(pid_list)
+
+ for i, mem in enumerate(mem_consumed_list_1):
+ condition_met = False
+ if mem - mem_consumed_list[i] <= 50:
+ condition_met = True
+
+ self.assertTrue(condition_met, "Unexpected: Memory consumption"
+ " glusterd increased more than the expected"
+ " of value")
+
+ # Perform volume operations for 100 volumes for second time
+ self._volume_operations_in_loop()
+
+ # Fetch the list of memory consumed in all the nodes after 2 iterations
+ mem_consumed_list_2 = self._memory_consumption_for_all_nodes(pid_list)
+
+ for i, mem in enumerate(mem_consumed_list_2):
+ condition_met = False
+ if mem - mem_consumed_list_1[i] <= 10:
+ condition_met = True
+
+ self.assertTrue(condition_met, "Unexpected: Memory consumption"
+ " glusterd increased more than the expected"
+ " of value")
+
+ # Perform volume operations for 100 volumes for third time
+ self._volume_operations_in_loop()
+
+ # Fetch the list of memory consumed in all the nodes after 3 iterations
+ mem_consumed_list_3 = self._memory_consumption_for_all_nodes(pid_list)
+
+ for i, mem in enumerate(mem_consumed_list_3):
+ condition_met = False
+ if mem - mem_consumed_list_2[i] <= 10:
+ condition_met = True
+
+ self.assertTrue(condition_met, "Unexpected: Memory consumption"
+ " glusterd increased more than the expected"
+ " of value")
diff --git a/tests/functional/glusterd/test_glusterd_quorum.py b/tests/functional/glusterd/test_glusterd_quorum.py
new file mode 100644
index 000000000..9744370e8
--- /dev/null
+++ b/tests/functional/glusterd/test_glusterd_quorum.py
@@ -0,0 +1,300 @@
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from time import sleep
+import pytest
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import wait_for_bricks_to_be_online
+from glustolibs.gluster.volume_libs import (setup_volume, volume_exists,
+ cleanup_volume)
+from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
+ is_glusterd_running)
+from glustolibs.gluster.volume_ops import (set_volume_options, volume_start,
+ volume_stop, volume_delete,
+ get_volume_list, volume_reset)
+from glustolibs.gluster.peer_ops import (is_peer_connected, peer_probe_servers,
+ peer_detach_servers, peer_probe)
+
+
+@runs_on([['distributed-replicated', 'replicated'], ['glusterfs']])
+class TestServerQuorum(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ cls.get_super_method(cls, 'setUpClass')()
+ ret = volume_exists(cls.mnode, cls.volname)
+ if ret:
+ ret = cleanup_volume(cls.mnode, cls.volname)
+ if not ret:
+ raise ExecutionError("Unable to delete volume")
+ g.log.info("Successfully deleted volume % s", cls.volname)
+
+ # Check if peer is connected state or not and detach all the nodes
+ for server in cls.servers:
+ ret = is_peer_connected(server, cls.servers)
+ if ret:
+ ret = peer_detach_servers(server, cls.servers)
+ if not ret:
+ raise ExecutionError(
+ "Detach failed from all the servers from the node.")
+ g.log.info("Peer detach SUCCESSFUL.")
+
+ # Before starting the testcase, proceed only it has minimum of 4 nodes
+ if len(cls.servers) < 4:
+ raise ExecutionError("Minimun four nodes required for this "
+ " testcase to execute")
+
+ def tearDown(self):
+
+ vol_list = get_volume_list(self.mnode)
+ if vol_list is None:
+ raise ExecutionError("Failed to get volume list")
+
+ for volume in vol_list:
+ ret = cleanup_volume(self.mnode, volume)
+ if not ret:
+ raise ExecutionError("Failed Cleanup the volume")
+ g.log.info("Volume deleted successfully %s", volume)
+
+ # Setting quorum ratio to 51%
+ ret = set_volume_options(self.mnode, 'all',
+ {'cluster.server-quorum-ratio': '51%'})
+ if not ret:
+ raise ExecutionError("Failed to set server quorum ratio on %s"
+ % self.volname)
+
+ # Peer probe servers since we are doing peer detach in setUpClass
+ for server in self.servers:
+ ret = is_peer_connected(server, self.servers)
+ if not ret:
+ ret = peer_probe_servers(server, self.servers)
+ if not ret:
+ raise ExecutionError(
+ "Peer probe failed to one of the node")
+ g.log.info("Peer probe successful")
+
+ self.get_super_method(self, 'tearDown')()
+
+ @pytest.mark.test_glusterd_quorum_validation
+ def test_glusterd_quorum_validation(self):
+ """
+ -> Creating two volumes and starting them, stop the second volume
+ -> set the server quorum and set the ratio to 90
+ -> Stop the glusterd in one of the node, so the quorum won't meet
+ -> Peer probing a new node should fail
+ -> Volume stop will fail
+ -> volume delete will fail
+ -> volume reset will fail
+ -> Start the glusterd on the node where it is stopped
+ -> Volume stop, start, delete will succeed once quorum is met
+ """
+ # pylint: disable=too-many-statements, too-many-branches
+
+ # Peer probe first 3 servers
+ servers_info_from_three_nodes = {}
+ for server in self.servers[0:3]:
+ servers_info_from_three_nodes[
+ server] = self.all_servers_info[server]
+
+ # Peer probe the first 3 servers
+ ret, _, _ = peer_probe(self.mnode, server)
+ self.assertEqual(ret, 0,
+ ("Peer probe failed to one of the server"))
+ g.log.info("Peer probe to first 3 nodes succeeded")
+
+ self.volume['servers'] = self.servers[0:3]
+ # Create a volume using the first 3 nodes
+ ret = setup_volume(self.mnode, servers_info_from_three_nodes,
+ self.volume, force=True)
+ self.assertTrue(ret, ("Failed to create and start volume"))
+ g.log.info("Volume created and started successfully")
+
+ # Creating another volume and stopping it
+ second_volume = "second_volume"
+ self.volume['name'] = second_volume
+ ret = setup_volume(self.mnode, servers_info_from_three_nodes,
+ self.volume, force=True)
+ self.assertTrue(ret, ("Failed to create and start volume"))
+ g.log.info("Volume created and started succssfully")
+
+ # stopping the second volume
+ g.log.info("Stopping the second volume %s", second_volume)
+ ret, _, _ = volume_stop(self.mnode, second_volume)
+ self.assertEqual(ret, 0, ("Failed to stop the volume"))
+ g.log.info("Successfully stopped second volume %s", second_volume)
+
+ # Setting the server-quorum-type as server
+ self.options = {"cluster.server-quorum-type": "server"}
+ vol_list = get_volume_list(self.mnode)
+ self.assertIsNotNone(vol_list, "Failed to get the volume list")
+ g.log.info("Fetched the volume list")
+ for volume in vol_list:
+ g.log.info("Setting the server-quorum-type as server"
+ " on volume %s", volume)
+ ret = set_volume_options(self.mnode, volume, self.options)
+ self.assertTrue(ret, ("Failed to set the quorum type as a server"
+ " on volume %s", volume))
+ g.log.info("Server Quorum type is set as a server")
+
+ # Setting the server quorum ratio to 90
+ self.quorum_perecent = {'cluster.server-quorum-ratio': '90%'}
+ ret = set_volume_options(self.mnode, 'all', self.quorum_perecent)
+ self.assertTrue(ret, ("Failed to set the server quorum ratio "
+ "to 90 on servers"))
+ g.log.info("Successfully set server quorum ratio to 90% on servers")
+
+ # Stop glusterd on one of the node
+ ret = stop_glusterd(self.servers[2])
+ self.assertTrue(ret, ("Failed to stop glusterd on "
+ "node %s", self.servers[2]))
+ g.log.info("Glusterd stop on the nodes : %s"
+ " succeeded", self.servers[2])
+
+ # Check glusterd is stopped
+ ret = is_glusterd_running(self.servers[2])
+ self.assertEqual(ret, 1, "Unexpected: Glusterd is running on node")
+ g.log.info("Expected: Glusterd stopped on node %s", self.servers[2])
+
+ # Adding a new peer will fail as quorum not met
+ ret, _, _ = peer_probe(self.mnode, self.servers[3])
+ self.assertNotEqual(ret, 0, (
+ "Unexpected:"
+ "Succeeded to peer probe new node %s when quorum "
+ "is not met", self.servers[3]))
+ g.log.info("Failed to peer probe new node as expected"
+ " when quorum not met")
+
+ # Stopping an already started volume should fail as quorum is not met
+ ret, _, _ = volume_start(self.mnode, second_volume)
+ self.assertNotEqual(ret, 0, "Unexpected: Successfuly started "
+ "volume even when quorum not met.")
+ g.log.info("Volume start %s failed as expected when quorum "
+ "is not met", second_volume)
+
+ # Stopping a volume should fail stop the first volume
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertEqual(ret, 1, "Unexpected: Successfully stopped"
+ " volume even when quourm is not met")
+ g.log.info("volume stop %s failed as expected when quorum "
+ "is not met", self.volname)
+
+ # Stopping a volume with force option should fail
+ ret, _, _ = volume_stop(self.mnode, self.volname, force=True)
+ self.assertNotEqual(ret, 0, "Unexpected: Successfully "
+ "stopped volume with force. Expected: "
+ "Volume stop should fail when quourm is not met")
+ g.log.info("volume stop failed as expected when quorum is not met")
+
+ # Deleting a volume should fail. Deleting the second volume.
+ ret = volume_delete(self.mnode, second_volume)
+ self.assertFalse(ret, "Unexpected: Volume delete was "
+ "successful even when quourm is not met")
+ g.log.info("volume delete failed as expected when quorum is not met")
+
+ # Volume reset should fail when quorum is not met
+ ret, _, _ = volume_reset(self.mnode, self.volname)
+ self.assertNotEqual(ret, 0, "Unexpected: Volume reset was "
+ "successful even when quorum is not met")
+ g.log.info("volume reset failed as expected when quorum is not met")
+
+ # Volume reset should fail even with force when quourum is not met
+ ret, _, _ = volume_reset(self.mnode, self.volname, force=True)
+ self.assertNotEqual(ret, 0, "Unexpected: Volume reset was "
+ "successful with force even "
+ "when quourm is not met")
+ g.log.info("volume reset failed as expected when quorum is not met")
+
+ # Start glusterd on the node where glusterd is stopped
+ ret = start_glusterd(self.servers[2])
+ self.assertTrue(ret, "Failed to start glusterd on one node")
+ g.log.info("Started glusterd on server"
+ " %s successfully", self.servers[2])
+
+ ret = is_glusterd_running(self.servers[2])
+ self.assertEqual(ret, 0, ("glusterd is not running on "
+ "node %s", self.servers[2]))
+ g.log.info("glusterd is running on node"
+ " %s ", self.servers[2])
+
+ # Check peer status whether all peer are in connected state none of the
+ # nodes should be in peer rejected state
+ halt, counter, _rc = 30, 0, False
+ g.log.info("Wait for some seconds, right after glusterd start it "
+ "will create two daemon process it need few seconds "
+ "(like 3-5) to initialize the glusterd")
+ while counter < halt:
+ ret = is_peer_connected(self.mnode, self.servers[0:3])
+ if not ret:
+ g.log.info("Peers are not connected state,"
+ " Retry after 2 seconds .......")
+ sleep(2)
+ counter = counter + 2
+ else:
+ _rc = True
+ g.log.info("Peers are in connected state in the cluster")
+ break
+
+ self.assertTrue(_rc, ("Peers are not connected state after "
+ "bringing back glusterd online on the "
+ "nodes in which previously glusterd "
+ "had been stopped"))
+
+ # Check all bricks are online or wait for the bricks to be online
+ ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, "All bricks are not online")
+ g.log.info("All bricks of the volume %s are online", self.volname)
+
+ # Once quorum is met should be able to cleanup the volume
+ ret = volume_delete(self.mnode, second_volume)
+ self.assertTrue(ret, "Volume delete failed even when quorum is met")
+ g.log.info("volume delete succeed without any issues")
+
+ # Volume stop should succeed
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Volume stop failed")
+ g.log.info("succeeded stopping the volume as expected")
+
+ # volume reset should succeed
+ ret, _, _ = volume_reset(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Volume reset failed ")
+ g.log.info("volume reset succeeded as expected when quorum is not met")
+
+ # Peer probe new node should succeed
+ ret, _, _ = peer_probe(self.mnode, self.servers[3])
+ self.assertEqual(ret, 0, (
+ "Failed to peer probe new node even when quorum is met"))
+ g.log.info("Succeeded to peer probe new node when quorum met")
+
+ # Check peer status whether all peer are in connected state none of the
+ # nodes should be in peer rejected state
+ halt, counter, _rc = 30, 0, False
+ g.log.info("Wait for some seconds, right after peer probe")
+ while counter < halt:
+ ret = is_peer_connected(self.mnode, self.servers[0:3])
+ if not ret:
+ g.log.info("Peers are not connected state,"
+ " Retry after 2 seconds .......")
+ sleep(2)
+ counter = counter + 2
+ else:
+ _rc = True
+ g.log.info("Peers are in connected state in the cluster")
+ break
+
+ self.assertTrue(_rc, ("Peers are not connected state"))
diff --git a/tests/functional/glusterd/test_glusterd_quorum_command.py b/tests/functional/glusterd/test_glusterd_quorum_command.py
new file mode 100644
index 000000000..034d626b3
--- /dev/null
+++ b/tests/functional/glusterd/test_glusterd_quorum_command.py
@@ -0,0 +1,104 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Test quorum cli commands in glusterd
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import (
+ set_volume_options,
+ volume_reset,
+ get_volume_options)
+
+
+@runs_on([['replicated', 'arbiter', 'dispersed', 'distributed',
+ 'distributed-replicated', 'distributed-arbiter'],
+ ['glusterfs']])
+class TestGlusterDQuorumCLICommands(GlusterBaseClass):
+ """ Testing Quorum CLI commands in GlusterD """
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount the volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup volume and mount it")
+
+ def set_and_check_vol_option(self, option_name, option_value,
+ for_all=False):
+ """ Function for setting and checking volume_options """
+ # Set the volume option
+ vol_option = {option_name: option_value}
+ if not for_all:
+ ret = set_volume_options(self.mnode, self.volname, vol_option)
+ else:
+ ret = set_volume_options(self.mnode, 'all', vol_option)
+ self.assertTrue(ret, "gluster volume option set of %s to %s failed"
+ % (option_name, option_value))
+
+ # Validate the option set
+ if not for_all:
+ ret = get_volume_options(self.mnode, self.volname, option_name)
+ else:
+ ret = get_volume_options(self.mnode, 'all', option_name)
+ self.assertIsNotNone(ret, "The %s option is not present" % option_name)
+ self.assertEqual(ret[option_name], option_value,
+ ("Volume option for %s is not equal to %s"
+ % (option_name, option_value)))
+ g.log.info("Volume option %s is equal to the expected value %s",
+ option_name, option_value)
+
+ def test_glusterd_quorum_cli_commands(self):
+ """
+ Test quorum CLI commands on glusterd
+ 1. Create a volume and start it.
+ 2. Set the quorum type to 'server' and verify it.
+ 3. Set the quorum type to 'none' and verify it.
+ 4. Set the quorum ratio and verify it.
+ """
+ # Set server quorum type to 'server' and validate it
+ self.set_and_check_vol_option('cluster.server-quorum-type', 'server')
+
+ # Set server quorum type to 'none' and validate it
+ self.set_and_check_vol_option('cluster.server-quorum-type', 'none')
+
+ # Set server quorum ratio to 90% and validate it
+ self.set_and_check_vol_option('cluster.server-quorum-ratio', '90%',
+ True)
+
+ def tearDown(self):
+ """tear Down Callback"""
+ # Unmount volume and cleanup.
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to unmount and cleanup volume")
+ g.log.info("Successful in unmount and cleanup of volume")
+
+ # Reset the cluster options.
+ ret = volume_reset(self.mnode, "all")
+ if not ret:
+ raise ExecutionError("Failed to Reset the cluster options.")
+ g.log.info("Successfully reset cluster options.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_glusterd_replace_brick.py b/tests/functional/glusterd/test_glusterd_replace_brick.py
index eec182f55..ec415049d 100644
--- a/tests/functional/glusterd/test_glusterd_replace_brick.py
+++ b/tests/functional/glusterd/test_glusterd_replace_brick.py
@@ -31,7 +31,7 @@ from glustolibs.gluster.brick_libs import are_bricks_online
'distributed-dispersed'], ['glusterfs']])
class TestReplaceBrick(GlusterBaseClass):
def setUp(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.test_method_complete = False
# Creating a volume and starting it
ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
@@ -40,7 +40,7 @@ class TestReplaceBrick(GlusterBaseClass):
g.log.info("Volume created successfully")
def tearDown(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'tearDown')()
self.test_method_complete = False
ret = self.cleanup_volume()
if not ret:
diff --git a/tests/functional/glusterd/test_glusterd_restart_quorum_not_met.py b/tests/functional/glusterd/test_glusterd_restart_quorum_not_met.py
new file mode 100644
index 000000000..6716f70f8
--- /dev/null
+++ b/tests/functional/glusterd/test_glusterd_restart_quorum_not_met.py
@@ -0,0 +1,125 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Test brick status when quorum isn't met after glusterd restart.
+"""
+
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.gluster.volume_libs import (
+ wait_for_volume_process_to_be_online)
+from glustolibs.gluster.gluster_init import (
+ stop_glusterd,
+ start_glusterd,
+ restart_glusterd,
+ wait_for_glusterd_to_start)
+from glustolibs.gluster.brick_libs import (
+ are_bricks_offline,
+ get_all_bricks)
+
+
+@runs_on([['distributed', 'distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'replicated', 'dispersed', 'arbiter'],
+ ['glusterfs']])
+class TestBrickStatusQuorumNotMet(GlusterBaseClass):
+ def setUp(self):
+ """
+ setUp method for every test.
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount the volume.
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup volume and mount it.")
+
+ def test_offline_brick_status_when_quorum_not_met(self):
+ """
+ Test Brick status when Quorum is not met after glusterd restart.
+ 1. Create a volume and mount it.
+ 2. Set the quorum type to 'server'.
+ 3. Bring some nodes down such that quorum won't be met.
+ 4. Brick status should be offline in the node which is up.
+ 5. Restart glusterd in this node.
+ 6. The brick status still should be offline as quorum isn't met.
+ """
+ # Set the quorum type to server and validate it.
+ vol_option = {'cluster.server-quorum-type': 'server'}
+ ret = set_volume_options(self.mnode, self.volname, vol_option)
+ self.assertTrue(ret, "gluster volume option set of %s to %s failed"
+ % ('cluster.server-quorum-type', 'server'))
+ g.log.info("Cluster quorum set to type server.")
+
+ # Get the brick list.
+ brick_list = get_all_bricks(self.mnode, self.volname)
+
+ # Stop glusterd processes.
+ ret = stop_glusterd(self.servers[1:])
+ self.assertTrue(ret, "Failed to stop glusterd on specified nodes.")
+ g.log.info("Glusterd processes stopped in the desired servers.")
+
+ # Get the brick status in a node where glusterd is up.
+ ret = are_bricks_offline(self.mnode, self.volname, brick_list[0:1])
+ self.assertTrue(ret, "Bricks are online")
+ g.log.info("Bricks are offline as expected.")
+
+ # Restart one of the node which is up.
+ ret = restart_glusterd(self.servers[0])
+ self.assertTrue(ret, ("Failed to restart glusterd on desired node."))
+ g.log.info("Glusterd restarted on the desired node.")
+
+ # Wait for glusterd to be online and validate it's running.
+ self.assertTrue(wait_for_glusterd_to_start(self.servers[0]),
+ "Glusterd not up on the desired server.")
+ g.log.info("Glusterd is up in the desired server.")
+
+ # Get the brick status from the restarted node.
+ ret = are_bricks_offline(self.mnode, self.volname, brick_list[0:1])
+ self.assertTrue(ret, "Bricks are online")
+ g.log.info("Bricks are offline as expected.")
+
+ # Start glusterd on all servers.
+ ret = start_glusterd(self.servers)
+ self.assertTrue(ret, "Failed to start glusterd on the specified nodes")
+ g.log.info("Initiated start of glusterd on all nodes.")
+
+ # Wait for glusterd to start.
+ ret = wait_for_glusterd_to_start(self.servers)
+ self.assertTrue(ret, "Glusterd not up on all nodes.")
+ g.log.info("Glusterd is up and running on all nodes.")
+
+ # Wait for all volume processes to be online
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname,
+ timeout=600)
+ self.assertTrue(ret, ("All volume processes not up."))
+ g.log.info("All volume processes are up.")
+
+ def tearDown(self):
+ """tear Down callback"""
+ # unmount volume and cleanup.
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to unmount and cleanup volume")
+ g.log.info("Successful in unmount and cleanup operations")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_glusterd_selinux.py b/tests/functional/glusterd/test_glusterd_selinux.py
new file mode 100644
index 000000000..1790780bc
--- /dev/null
+++ b/tests/functional/glusterd/test_glusterd_selinux.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description:
+ Test Cases in this module tests Gluster against SELinux Labels and Policies
+"""
+
+import pytest
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.glusterfile import file_exists
+
+
+class TestGlusterAgainstSELinux(GlusterBaseClass):
+ """Glusterd checks against SELinux Labels and Policies
+ """
+
+ @staticmethod
+ def run_cmd(host, cmd, opts='', operate_on=''):
+ if opts:
+ opts = '-'+opts
+ command = "{} {} {}".format(cmd, opts, operate_on)
+ rcode, rout, rerr = g.run(host, command)
+ if not rcode:
+ return True, rout
+
+ g.log.error("On '%s', '%s' returned '%s'", host, command, rerr)
+ return False, rout
+
+ @pytest.mark.test_selinux_label
+ def test_selinux_label(self):
+ """
+ TestCase:
+ 1. Check the existence of '/usr/lib/firewalld/services/glusterfs.xml'
+ 2. Validate the owner of this file as 'glusterfs-server'
+ 3. Validate SELinux label context as 'system_u:object_r:lib_t:s0'
+ """
+
+ fqpath = '/usr/lib/firewalld/services/glusterfs.xml'
+
+ for server in self.all_servers_info:
+ # Check existence of xml file
+ self.assertTrue(file_exists(server, fqpath), "Failed to verify "
+ "existence of '{}' in {} ".format(fqpath, server))
+ g.log.info("Validated the existence of required xml file")
+
+ # Check owner of xml file
+ status, result = self.run_cmd(server, 'rpm', 'qf', fqpath)
+ self.assertTrue(status, "Fail: Not able to find owner for {} on "
+ "{}".format(fqpath, server))
+ exp_str = 'glusterfs-server'
+ self.assertIn(exp_str, result, "Fail: Owner of {} should be "
+ "{} on {}".format(fqpath, exp_str, server))
+
+ # Validate SELinux label
+ status, result = self.run_cmd(server, 'ls', 'lZ', fqpath)
+ self.assertTrue(status, "Fail: Not able to find SELinux label "
+ "for {} on {}".format(fqpath, server))
+ exp_str = 'system_u:object_r:lib_t:s0'
+ self.assertIn(exp_str, result, "Fail: SELinux label on {}"
+ "should be {} on {}".format(fqpath, exp_str, server))
diff --git a/tests/functional/glusterd/test_glusterd_set_reset_reserve_limit.py b/tests/functional/glusterd/test_glusterd_set_reset_reserve_limit.py
new file mode 100644
index 000000000..c3104f198
--- /dev/null
+++ b/tests/functional/glusterd/test_glusterd_set_reset_reserve_limit.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Test set and reset of storage reserve limit in glusterd
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import (
+ set_volume_options,
+ reset_volume_option,
+ get_volume_options)
+
+
+@runs_on([['distributed', 'distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'replicated', 'arbiter', 'dispersed'],
+ ['glusterfs']])
+class TestGlusterDSetResetReserveLimit(GlusterBaseClass):
+ """ Testing set and reset of Reserve limit in GlusterD """
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount the volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup volume and mount it")
+
+ def validate_vol_option(self, option_name, value_expected):
+ """ Function for validating volume options """
+ # Get the volume option.
+ ret = get_volume_options(self.mnode, self.volname, option_name)
+ self.assertIsNotNone(ret, "The %s option is not present" % option_name)
+ self.assertEqual(ret[option_name], value_expected,
+ ("Volume option for %s is not equal to %s"
+ % (option_name, value_expected)))
+ g.log.info("Volume option %s is equal to the expected value %s",
+ option_name, value_expected)
+
+ def test_glusterd_set_reset_reserve_limit(self):
+ """
+ Test set and reset of reserve limit on glusterd
+ 1. Create a volume and start it.
+ 2. Set storage.reserve limit on the created volume and verify it.
+ 3. Reset storage.reserve limit on the created volume and verify it.
+ """
+ # Setting storage.reserve to 50
+ ret = set_volume_options(self.mnode, self.volname,
+ {'storage.reserve': '50'})
+ self.assertTrue(ret, "Failed to set storage reserve on %s"
+ % self.mnode)
+
+ # Validate storage.reserve option set to 50
+ self.validate_vol_option('storage.reserve', '50')
+
+ # Reseting the storage.reserve limit
+ ret, _, _ = reset_volume_option(self.mnode, self.volname,
+ 'storage.reserve')
+ self.assertEqual(ret, 0, "Failed to reset the storage.reserve limit")
+
+ # Validate that the storage.reserve option is reset
+ ret = get_volume_options(self.mnode, self.volname, 'storage.reserve')
+ if ret['storage.reserve'] == '1':
+ self.validate_vol_option('storage.reserve', '1')
+ else:
+ self.validate_vol_option('storage.reserve', '1 (DEFAULT)')
+
+ def tearDown(self):
+ """tear Down Callback"""
+ # Unmount volume and cleanup.
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to unmount and cleanup volume")
+ g.log.info("Successful in unmount and cleanup of volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py b/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py
index 8c0f7a7f2..7eeb5f93d 100644
--- a/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py
+++ b/tests/functional/glusterd/test_glusterd_snap_info_on_detached_node.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,15 +14,17 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import random
-from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import file_exists
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.volume_ops import (volume_create,
set_volume_options, volume_start)
from glustolibs.gluster.snap_ops import snap_create, snap_activate
-from glustolibs.gluster.peer_ops import peer_detach_servers, peer_probe
+from glustolibs.gluster.peer_ops import (
+ peer_detach_servers,
+ peer_probe_servers)
@runs_on([['distributed'], ['glusterfs']])
@@ -30,6 +32,10 @@ class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass):
def tearDown(self):
+ ret = peer_probe_servers(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Failed to peer probe servers")
+
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
@@ -37,7 +43,7 @@ class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_snap_info_from_detached_node(self):
# pylint: disable=too-many-statements
@@ -100,12 +106,10 @@ class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass):
# Validate files /var/lib/glusterd/snaps on all the servers is same
self.pathname = "/var/lib/glusterd/snaps/%s" % self.snapname
for server in self.servers:
- conn = g.rpyc_get_connection(server)
- ret = conn.modules.os.path.isdir(self.pathname)
+ ret = file_exists(server, self.pathname)
self.assertTrue(ret, "%s directory doesn't exist on node %s" %
(self.pathname, server))
g.log.info("%s path exists on node %s", self.pathname, server)
- g.rpyc_close_deployed_servers()
# Peer detach one node
self.random_node_peer_detach = random.choice(self.servers[1:])
@@ -116,28 +120,10 @@ class TestSnapInfoOnPeerDetachedNode(GlusterBaseClass):
g.log.info("Peer detach succeeded")
# /var/lib/glusterd/snaps/<snapname> directory should not present
- conn = g.rpyc_get_connection(self.random_node_peer_detach)
- ret = conn.modules.os.path.isdir(self.pathname)
+
+ ret = file_exists(self.random_node_peer_detach, self.pathname)
self.assertFalse(ret, "%s directory should not exist on the peer"
"which is detached from cluster%s" % (
self.pathname, self.random_node_peer_detach))
g.log.info("Expected: %s path doesn't exist on peer detached node %s",
self.pathname, self.random_node_peer_detach)
- g.rpyc_close_deployed_servers()
-
- # Peer probe the detached node
- ret, _, _ = peer_probe(self.mnode, self.random_node_peer_detach)
- self.assertEqual(ret, 0, "Peer probe of node: %s failed" %
- self.random_node_peer_detach)
- g.log.info("Peer probe succeeded")
-
- # Validating peers are in connected state
- count = 0
- while count < 10:
- sleep(2)
- ret = self.validate_peers_are_connected()
- if ret:
- break
- count += 1
- self.assertTrue(ret, "Peers are not in connected state")
- g.log.info("Peer are in connected state")
diff --git a/tests/functional/glusterd/test_glusterd_split_brain.py b/tests/functional/glusterd/test_glusterd_split_brain.py
index ad3d05213..c8954da26 100644
--- a/tests/functional/glusterd/test_glusterd_split_brain.py
+++ b/tests/functional/glusterd/test_glusterd_split_brain.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,11 +14,11 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-import time
+from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.peer_ops import is_peer_connected
+from glustolibs.gluster.peer_ops import is_peer_connected, peer_probe_servers
from glustolibs.gluster.volume_libs import (cleanup_volume,
setup_volume)
from glustolibs.gluster.volume_ops import (get_volume_list,
@@ -32,7 +32,7 @@ from glustolibs.gluster.gluster_init import (is_glusterd_running,
class GlusterdSplitBrainQuorumValidation(GlusterBaseClass):
def setUp(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Overriding the volume type to specifically test the volume type
if self.volume_type == "distributed-replicated":
@@ -58,7 +58,25 @@ class GlusterdSplitBrainQuorumValidation(GlusterBaseClass):
def tearDown(self):
# stopping the volume and Cleaning up the volume
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ # Takes 5 seconds to restart glusterd into peer connected state
+ sleep(5)
+ g.log.info("Glusterd started successfully on %s", self.servers)
+
+ # checking for peer status from every node
+ ret = is_peer_connected(self.mnode, self.servers)
+ if not ret:
+ ret = peer_probe_servers(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Failed to peer probe failed in "
+ "servers %s" % self.servers)
+ g.log.info("All peers are in connected state")
vol_list = get_volume_list(self.mnode)
if vol_list is None:
raise ExecutionError("Failed to get the volume list")
@@ -138,7 +156,7 @@ class GlusterdSplitBrainQuorumValidation(GlusterBaseClass):
if not ret:
g.log.info("Peers are not connected state,"
" Retry after 2 seconds .......")
- time.sleep(2)
+ sleep(2)
counter = counter + 2
else:
_rc = True
diff --git a/tests/functional/glusterd/test_host_uuid_in_volume_info_xml.py b/tests/functional/glusterd/test_host_uuid_in_volume_info_xml.py
index b5ed75e16..7b7ae20e6 100644
--- a/tests/functional/glusterd/test_host_uuid_in_volume_info_xml.py
+++ b/tests/functional/glusterd/test_host_uuid_in_volume_info_xml.py
@@ -29,7 +29,7 @@ class TestUUID(GlusterBaseClass):
def setUp(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# check whether peers are in connected state
ret = self.validate_peers_are_connected()
@@ -60,7 +60,7 @@ class TestUUID(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to probe detached "
"servers %s" % self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_uuid_in_volume_info_xml(self):
diff --git a/tests/functional/glusterd/test_lower_gluster_op_version.py b/tests/functional/glusterd/test_lower_gluster_op_version.py
index cfd2c00fa..5efc5d7b0 100644
--- a/tests/functional/glusterd/test_lower_gluster_op_version.py
+++ b/tests/functional/glusterd/test_lower_gluster_op_version.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,8 +22,8 @@ from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import cleanup_volume
-from glustolibs.gluster.volume_libs import (get_volume_options,
- set_volume_options)
+from glustolibs.gluster.volume_ops import (get_volume_options,
+ set_volume_options)
@runs_on([['replicated'], ['glusterfs']])
@@ -32,7 +32,7 @@ class LowerGlusterOpVersion(GlusterBaseClass):
def setUp(self):
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
g.log.info("Started creating volume")
ret = self.setup_volume()
@@ -43,7 +43,7 @@ class LowerGlusterOpVersion(GlusterBaseClass):
def tearDown(self):
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# stopping the volume and Cleaning up the volume
ret = cleanup_volume(self.mnode, self.volname)
if not ret:
diff --git a/tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py b/tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py
index 491e9abd5..886a441e4 100644
--- a/tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py
+++ b/tests/functional/glusterd/test_mount_after_removing_client_logs_dir.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,7 +21,7 @@
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.glusterdir import (mkdir, get_dir_contents)
+from glustolibs.gluster.glusterdir import (get_dir_contents, mkdir)
from glustolibs.gluster.mount_ops import mount_volume, umount_volume
@@ -30,7 +30,7 @@ from glustolibs.gluster.mount_ops import mount_volume, umount_volume
class TestRemoveCientLogDirAndMount(GlusterBaseClass):
def setUp(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume and mounting volume.
ret = self.setup_volume(self.mounts)
@@ -75,7 +75,7 @@ class TestRemoveCientLogDirAndMount(GlusterBaseClass):
% self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_mount_after_removing_client_logs_dir(self):
diff --git a/tests/functional/glusterd/test_mountpoint_ownership_post_volume_restart.py b/tests/functional/glusterd/test_mountpoint_ownership_post_volume_restart.py
new file mode 100644
index 000000000..5a01d860f
--- /dev/null
+++ b/tests/functional/glusterd/test_mountpoint_ownership_post_volume_restart.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Test mount point ownership persistence post volume restart.
+"""
+
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import (
+ get_file_stat,
+ set_file_permissions)
+from glustolibs.gluster.volume_ops import (
+ volume_stop,
+ volume_start)
+from glustolibs.gluster.volume_libs import wait_for_volume_process_to_be_online
+
+
+@runs_on([['arbiter', 'distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'distributed-arbiter'],
+ ['glusterfs']])
+class TestMountPointOwnershipPostVolumeRestart(GlusterBaseClass):
+ """ Test mount point ownership persistence post volume restart """
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount the volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup volume and mount it")
+ self.client = self.mounts[0].client_system
+ self.mountpoint = self.mounts[0].mountpoint
+
+ def validate_mount_permissions(self):
+ """
+ Verify the mount permissions
+ """
+ stat_mountpoint_dict = get_file_stat(self.client,
+ self.mounts[0].mountpoint)
+ self.assertEqual(stat_mountpoint_dict['access'], '777', "Expected 777 "
+ " but found %s" % stat_mountpoint_dict['access'])
+ g.log.info("Mountpoint permissions is 777, as expected.")
+
+ def test_mountpoint_ownsership_post_volume_restart(self):
+ """
+ Test mountpoint ownership post volume restart
+ 1. Create a volume and mount it on client.
+ 2. set ownsership permissions and validate it.
+ 3. Restart volume.
+ 4. Ownership permissions should persist.
+ """
+ # Set full permissions on the mountpoint.
+ ret = set_file_permissions(self.clients[0], self.mountpoint,
+ "-R 777")
+ self.assertTrue(ret, "Failed to set permissions on the mountpoint")
+ g.log.info("Set full permissions on the mountpoint.")
+
+ # Validate the permissions set.
+ self.validate_mount_permissions()
+
+ # Stop the volume.
+ ret = volume_stop(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
+ g.log.info("Successful in stopping volume.")
+
+ # Start the volume.
+ ret = volume_start(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to start volume %s" % self.volname))
+ g.log.info("Successful in starting volume.")
+
+ # Wait for all volume processes to be up and running.
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("All volume processes are not up"))
+ g.log.info("All volume processes are up and running.")
+
+ # Adding sleep for the mount to be recognized by client.
+ sleep(3)
+
+ # validate the mountpoint permissions.
+ self.validate_mount_permissions()
+
+ def tearDown(self):
+ """tearDown callback"""
+ # Unmount volume and cleanup.
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to unmount and cleanup volume")
+ g.log.info("Successful in unmount and cleanup of volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_nfs_quorum.py b/tests/functional/glusterd/test_nfs_quorum.py
index 62d2ce24a..62885198b 100644
--- a/tests/functional/glusterd/test_nfs_quorum.py
+++ b/tests/functional/glusterd/test_nfs_quorum.py
@@ -31,7 +31,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
g.log.info("Starting %s ", cls.__name__)
# checking for peer status from every node
@@ -44,7 +44,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
g.log.info("Started creating volume")
@@ -64,7 +64,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_nfs_mount_quorum_settings(self):
"""
diff --git a/tests/functional/glusterd/test_op_version.py b/tests/functional/glusterd/test_op_version.py
index 0b0cf5b7f..95735bec9 100644
--- a/tests/functional/glusterd/test_op_version.py
+++ b/tests/functional/glusterd/test_op_version.py
@@ -22,6 +22,7 @@
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import file_exists
from glustolibs.gluster.volume_ops import (get_volume_options,
set_volume_options)
@@ -34,7 +35,7 @@ class TestMaxSupportedOpVersion(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
g.log.info("Started creating volume")
@@ -52,7 +53,7 @@ class TestMaxSupportedOpVersion(GlusterBaseClass):
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_op_version(self):
'''
@@ -81,11 +82,9 @@ class TestMaxSupportedOpVersion(GlusterBaseClass):
# Checking vol file exist in all servers or not
file_path = '/var/lib/glusterd/vols/' + self.volname + '/info'
for server in self.servers:
- conn = g.rpyc_get_connection(server)
- ret = conn.modules.os.path.isfile(file_path)
+ ret = file_exists(server, file_path)
self.assertTrue(ret, "Vol file not found in server %s" % server)
g.log.info("vol file found in server %s", server)
- g.rpyc_close_deployed_servers()
# Getting version number from vol info file
# cmd: grepping version from vol info file
diff --git a/tests/functional/glusterd/test_ops_when_one_node_is_down.py b/tests/functional/glusterd/test_ops_when_one_node_is_down.py
index a18f572b0..c304f1dbd 100644
--- a/tests/functional/glusterd/test_ops_when_one_node_is_down.py
+++ b/tests/functional/glusterd/test_ops_when_one_node_is_down.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,13 +14,13 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-from time import sleep
from random import randint
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_init import stop_glusterd, start_glusterd
-from glustolibs.gluster.peer_ops import peer_status, is_peer_connected
+from glustolibs.gluster.gluster_init import (
+ start_glusterd, stop_glusterd, wait_for_glusterd_to_start)
+from glustolibs.gluster.peer_ops import peer_status, wait_for_peers_to_connect
from glustolibs.gluster.volume_ops import volume_list, volume_info
from glustolibs.gluster.volume_libs import (cleanup_volume, setup_volume)
@@ -30,7 +30,7 @@ class TestOpsWhenOneNodeIsDown(GlusterBaseClass):
def setUp(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Create and start a volume.
ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
@@ -45,14 +45,13 @@ class TestOpsWhenOneNodeIsDown(GlusterBaseClass):
ExecutionError("Failed to start glusterd.")
g.log.info("Successfully started glusterd.")
+ ret = wait_for_glusterd_to_start(self.servers)
+ if not ret:
+ ExecutionError("glusterd is not running on %s" % self.servers)
+ g.log.info("Glusterd start on the nodes succeeded")
+
# Checking if peer is connected.
- counter = 0
- while counter < 30:
- ret = is_peer_connected(self.mnode, self.servers)
- counter += 1
- if ret:
- break
- sleep(3)
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
if not ret:
ExecutionError("Peer is not in connected state.")
g.log.info("Peers is in connected state.")
@@ -63,7 +62,7 @@ class TestOpsWhenOneNodeIsDown(GlusterBaseClass):
raise ExecutionError("Unable to delete volume % s" % self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_ops_when_one_node_is_down(self):
diff --git a/tests/functional/glusterd/test_peer_detach.py b/tests/functional/glusterd/test_peer_detach.py
index db58607b8..8b62be868 100644
--- a/tests/functional/glusterd/test_peer_detach.py
+++ b/tests/functional/glusterd/test_peer_detach.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,17 +23,17 @@ from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.peer_ops import peer_detach
from glustolibs.gluster.peer_ops import peer_probe_servers
from glustolibs.gluster.lib_utils import is_core_file_created
+from glustolibs.gluster.volume_ops import volume_stop, volume_start
-@runs_on([['distributed', 'replicated', 'distributed-replicated',
- 'dispersed', 'distributed-dispersed'], ['glusterfs']])
+@runs_on([['replicated', 'distributed-dispersed'], ['glusterfs']])
class PeerDetachVerification(GlusterBaseClass):
"""
Test that peer detach works as expected
"""
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# checking for peer status from every node
ret = cls.validate_peers_are_connected()
@@ -43,23 +43,52 @@ class PeerDetachVerification(GlusterBaseClass):
g.log.info("All server peers are already in connected state "
"%s:", cls.servers)
- @classmethod
- def tearDownClass(cls):
+ def tearDown(self):
+
+ ret = peer_probe_servers(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Failed to peer probe servers")
+
# stopping the volume and Cleaning up the volume
- ret = cls.cleanup_volume()
- if ret:
- g.log.info("Volume deleted successfully : %s", cls.volname)
- else:
- raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname)
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ # A local function to detach peer when volume exists
+ def check_detach_error_message(self, use_force=True):
+ ret, _, err = peer_detach(self.mnode, self.servers[1],
+ force=use_force)
+ self.assertNotEqual(ret, 0, "detach server should fail: %s"
+ % self.servers[1])
+ msg = ('peer detach: failed: Brick(s) with the peer ' +
+ self.servers[1] + ' ' + 'exist in cluster')
+ if msg not in err:
+ msg = ('peer detach: failed: Peer ' + self.servers[1] +
+ ' hosts one or more bricks. ' +
+ 'If the peer is in not recoverable ' +
+ 'state then use either ' +
+ 'replace-brick or remove-brick command ' +
+ 'with force to remove ' +
+ 'all bricks from the peer and ' +
+ 'attempt the peer detach again.')
+ self.assertIn(msg, err, "Peer detach not failed with "
+ "proper error message")
def test_peer_detach_host(self):
+ # pylint: disable = too-many-statements
# peer Detaching specified server from cluster
- # peer Detaching detached server again
+ # peer Detaching detached server again and checking the error msg
# peer Detaching invalid host
# peer Detaching Non exist host
# peer Checking Core file created or not
# Peer detach one node which contains the bricks of volume created
# Peer detach force a node which is hosting bricks of a volume
+ # Peer detach one node which hosts bricks of offline volume
+ # Peer detach force a node which hosts bricks of offline volume
# Timestamp of current test case of start time
ret, test_timestamp, _ = g.run_local('date +%s')
@@ -80,9 +109,12 @@ class PeerDetachVerification(GlusterBaseClass):
# Detached server detaching again, Expected to fail detach
g.log.info("Start detached server detaching "
"again : %s", self.servers[1])
- ret, _, _ = peer_detach(self.mnode, self.servers[1])
+ ret, _, err = peer_detach(self.mnode, self.servers[1])
self.assertNotEqual(ret, 0, "Detach server should "
"fail :%s" % self.servers[1])
+ self.assertEqual(err, "peer detach: failed: %s is not part of "
+ "cluster\n" % self.servers[1], "Peer "
+ "Detach didn't fail as expected")
# Probing detached server
g.log.info("Start probing detached server : %s", self.servers[1])
@@ -102,13 +134,6 @@ class PeerDetachVerification(GlusterBaseClass):
self.assertNotEqual(ret, 0, "Detach non existing host "
"should fail :%s" % self.non_exist_host)
- # Chekcing core. file created or not in "/", "/tmp", "/log/var/core
- # directory
- ret = is_core_file_created(self.servers, test_timestamp)
- self.assertTrue(ret, "glusterd service should not crash")
- g.log.info("No core file found, glusterd service running "
- "successfully")
-
# Creating Volume
g.log.info("Started creating volume: %s", self.volname)
ret = self.setup_volume()
@@ -117,39 +142,37 @@ class PeerDetachVerification(GlusterBaseClass):
# Peer detach one node which contains the bricks of the volume created
g.log.info("Start detaching server %s which is hosting "
"bricks of a volume", self.servers[1])
- ret, _, err = peer_detach(self.mnode, self.servers[1])
- self.assertNotEqual(ret, 0, "detach server should fail: %s"
- % self.servers[1])
- msg = ('peer detach: failed: Brick(s) with the peer ' +
- self.servers[1] + ' ' + 'exist in cluster')
- if msg not in err:
- msg = ('peer detach: failed: Peer ' + self.servers[1] +
- ' hosts one or more bricks. ' +
- 'If the peer is in not recoverable ' +
- 'state then use either ' +
- 'replace-brick or remove-brick command ' +
- 'with force to remove ' +
- 'all bricks from the peer and ' +
- 'attempt the peer detach again.')
- self.assertIn(msg, err, "Peer detach not failed with "
- "proper error message")
+ self.check_detach_error_message(use_force=False)
# Peer detach force a node which is hosting bricks of a volume
+ g.log.info("Start detaching server using force %s which is hosting "
+ "bricks of a volume", self.servers[1])
+ self.check_detach_error_message()
+
+ # Peer detach one node which contains bricks of an offline volume
+ g.log.info("stopping the volume")
+ ret, _, err = volume_stop(self.mnode, self.volname)
+ msg = ('volume stop: ' + 'self.volname' + ': failed: Volume ' +
+ 'self.volname' + ' is not in the started state\n')
+ if msg not in err:
+ self.assertEqual(ret, 0, "stopping volume %s failed"
+ % self.volname)
+ g.log.info("Start to detach server %s which is hosting "
+ "bricks of an offline volume", self.servers[1])
+ self.check_detach_error_message(use_force=False)
+
+ # Forceful Peer detach node which hosts bricks of offline volume
g.log.info("start detaching server %s with force option "
"which is hosting bricks of a volume", self.servers[1])
- ret, _, err = peer_detach(self.mnode, self.servers[1], force=True)
- self.assertNotEqual(ret, 0, "detach server should fail with force "
- "option : %s" % self.servers[1])
- msg = ('peer detach: failed: Brick(s) with the peer ' +
- self.servers[1] + ' ' + 'exist in cluster')
- if msg not in err:
- msg = ('peer detach: failed: Peer ' + self.servers[1] +
- ' hosts one or more bricks. ' +
- 'If the peer is in not recoverable ' +
- 'state then use either ' +
- 'replace-brick or remove-brick command ' +
- 'with force to remove ' +
- 'all bricks from the peer and ' +
- 'attempt the peer detach again.')
- self.assertIn(msg, err, "Peer detach not failed with "
- "proper error message")
+ self.check_detach_error_message()
+
+ # starting volume for proper cleanup
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "volume start failed")
+
+ # Checking core. file created or not in "/", "/tmp", "/log/var/core
+ # directory
+ ret = is_core_file_created(self.servers, test_timestamp)
+ self.assertTrue(ret, "glusterd service should not crash")
+ g.log.info("No core file found, glusterd service running "
+ "successfully")
diff --git a/tests/functional/glusterd/test_peer_detach_check_warning_message.py b/tests/functional/glusterd/test_peer_detach_check_warning_message.py
index 4469aa17f..96606e083 100644
--- a/tests/functional/glusterd/test_peer_detach_check_warning_message.py
+++ b/tests/functional/glusterd/test_peer_detach_check_warning_message.py
@@ -31,7 +31,7 @@ class TestPeerDetachWarningMessage(GlusterBaseClass):
raise ExecutionError("Failed to detach %s" % self.servers[1])
g.log.info("Peer detach successful %s", self.servers[1])
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_peer_detach_check_warning_message(self):
# pylint: disable=too-many-statements
diff --git a/tests/functional/glusterd/test_peer_probe.py b/tests/functional/glusterd/test_peer_probe.py
index 1db7b352e..3a1d01806 100644
--- a/tests/functional/glusterd/test_peer_probe.py
+++ b/tests/functional/glusterd/test_peer_probe.py
@@ -40,7 +40,7 @@ class TestPeerProbe(GlusterBaseClass):
if ret != 0:
raise ExecutionError("Peer detach failed")
g.log.info("Peer detach SUCCESSFUL.")
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
"""
@@ -64,7 +64,7 @@ class TestPeerProbe(GlusterBaseClass):
"servers %s" % self.servers)
g.log.info("Peer probe success for detached "
"servers %s", self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_peer_probe(self):
"""
diff --git a/tests/functional/glusterd/test_peer_probe_after_setting_global_options.py b/tests/functional/glusterd/test_peer_probe_after_setting_global_options.py
index f9083b7bb..6d3259dc9 100644
--- a/tests/functional/glusterd/test_peer_probe_after_setting_global_options.py
+++ b/tests/functional/glusterd/test_peer_probe_after_setting_global_options.py
@@ -34,7 +34,7 @@ class TestPeerProbeAfterSettingGlobalOptions(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
ret = self.setup_volume()
@@ -57,7 +57,7 @@ class TestPeerProbeAfterSettingGlobalOptions(GlusterBaseClass):
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_peer_probe_global_options(self):
'''
diff --git a/tests/functional/glusterd/test_peer_probe_firewall_ports_not_opened.py b/tests/functional/glusterd/test_peer_probe_firewall_ports_not_opened.py
new file mode 100644
index 000000000..8c0920c9e
--- /dev/null
+++ b/tests/functional/glusterd/test_peer_probe_firewall_ports_not_opened.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from random import choice
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.peer_ops import (peer_probe, peer_detach,
+ peer_probe_servers,
+ nodes_from_pool_list)
+from glustolibs.gluster.lib_utils import is_core_file_created
+from glustolibs.gluster.exceptions import ExecutionError
+
+
+class TestPeerProbeWithFirewallNotOpened(GlusterBaseClass):
+
+ def setUp(self):
+ # Performing peer detach
+ for server in self.servers[1:]:
+ ret, _, _ = peer_detach(self.mnode, server)
+ if ret:
+ raise ExecutionError("Peer detach failed")
+ g.log.info("Peer detach SUCCESSFUL.")
+ self.get_super_method(self, 'setUp')()
+ self.node_to_probe = choice(self.servers[1:])
+
+ def tearDown(self):
+ # Add the removed services in firewall
+ for service in ('glusterfs', 'rpc-bind'):
+ for option in ("", " --permanent"):
+ cmd = ("firewall-cmd --zone=public --add-service={}{}"
+ .format(service, option))
+ ret, _, _ = g.run(self.node_to_probe, cmd)
+ if ret:
+ raise ExecutionError("Failed to add firewall service %s "
+ "on %s" % (service,
+ self.node_to_probe))
+
+ # Detach servers from cluster
+ pool = nodes_from_pool_list(self.mnode)
+ self.assertIsNotNone(pool, "Failed to get pool list")
+ for node in pool:
+ if not peer_detach(self.mnode, node):
+ raise ExecutionError("Failed to detach %s from %s"
+ % (node, self.mnode))
+ # Create a cluster
+ if not peer_probe_servers(self.mnode, self.servers):
+ raise ExecutionError("Failed to probe peer "
+ "servers %s" % self.servers)
+ g.log.info("Peer probe success for detached "
+ "servers %s", self.servers)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _remove_firewall_service(self):
+ """ Remove glusterfs and rpc-bind services from firewall"""
+ for service in ['glusterfs', 'rpc-bind']:
+ for option in ("", " --permanent"):
+ cmd = ("firewall-cmd --zone=public --remove-service={}{}"
+ .format(service, option))
+ ret, _, _ = g.run(self.node_to_probe, cmd)
+ self.assertEqual(ret, 0, ("Failed to bring down service {} on"
+ " node {}"
+ .format(service,
+ self.node_to_probe)))
+ g.log.info("Successfully removed glusterfs and rpc-bind services")
+
+ def _get_test_specific_glusterd_log(self, node):
+ """Gets the test specific glusterd log"""
+ # Extract the test specific cmds from cmd_hostory
+ start_msg = "Starting Test : %s : %s" % (self.id(),
+ self.glustotest_run_id)
+ end_msg = "Ending Test: %s : %s" % (self.id(),
+ self.glustotest_run_id)
+ glusterd_log = "/var/log/glusterfs/glusterd.log"
+ cmd = ("awk '/{}/ {{p=1}}; p; /{}/ {{p=0}}' {}"
+ .format(start_msg, end_msg, glusterd_log))
+ ret, test_specific_glusterd_log, err = g.run(node, cmd)
+ self.assertEqual(ret, 0, "Failed to extract glusterd log specific"
+ " to the current test case. "
+ "Error : %s" % err)
+ return test_specific_glusterd_log
+
+ def test_verify_peer_probe_with_firewall_ports_not_opened(self):
+ """
+ Test Steps:
+ 1. Open glusterd port only in Node1 using firewall-cmd command
+ 2. Perform peer probe to Node2 from Node 1
+ 3. Verify glusterd.log for Errors
+ 4. Check for core files created
+ """
+
+ ret, test_timestamp, _ = g.run_local('date +%s')
+ test_timestamp = test_timestamp.strip()
+
+ # Remove firewall service on the node to probe to
+ self._remove_firewall_service()
+
+ # Try peer probe from mnode to node
+ ret, _, err = peer_probe(self.mnode, self.node_to_probe)
+ self.assertEqual(ret, 1, ("Unexpected behavior: Peer probe should"
+ " fail when the firewall services are "
+ "down but returned success"))
+
+ expected_err = ('peer probe: failed: Probe returned with '
+ 'Transport endpoint is not connected\n')
+ self.assertEqual(err, expected_err,
+ "Expected error {}, but returned {}"
+ .format(expected_err, err))
+ msg = ("Peer probe of {} from {} failed as expected "
+ .format(self.mnode, self.node_to_probe))
+ g.log.info(msg)
+
+ # Verify there are no glusterd crashes
+ status = True
+ glusterd_logs = (self._get_test_specific_glusterd_log(self.mnode)
+ .split("\n"))
+ for line in glusterd_logs:
+ if ' E ' in line:
+ status = False
+ g.log.info("Error found: ' %s '", line)
+
+ self.assertTrue(status, "Glusterd crash found")
+
+ # Verify no core files are created
+ ret = is_core_file_created(self.servers, test_timestamp)
+ self.assertTrue(ret, "Unexpected crash found.")
+ g.log.info("No core file found as expected")
diff --git a/tests/functional/glusterd/test_peer_probe_while_snapd_running.py b/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
index aff015638..9e059b033 100644
--- a/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
+++ b/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
@@ -50,7 +50,7 @@ class TestPeerProbeWhileSnapdRunning(GlusterBaseClass):
raise ExecutionError("Failed to probe detached servers %s"
% self.servers)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_peer_probe_snapd_running(self):
diff --git a/tests/functional/glusterd/test_peer_status.py b/tests/functional/glusterd/test_peer_status.py
index cc282de57..65e8db9ed 100644
--- a/tests/functional/glusterd/test_peer_status.py
+++ b/tests/functional/glusterd/test_peer_status.py
@@ -34,8 +34,7 @@ from glustolibs.gluster.peer_ops import (peer_probe, peer_status, peer_detach,
class TestPeerStatus(GlusterBaseClass):
def setUp(self):
-
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Performing peer detach
ret = peer_detach_servers(self.mnode, self.servers)
@@ -67,7 +66,7 @@ class TestPeerStatus(GlusterBaseClass):
"servers %s" % self.servers)
g.log.info("Peer probe success for detached "
"servers %s", self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_peer_probe_status(self):
diff --git a/tests/functional/glusterd/test_probe_glusterd.py b/tests/functional/glusterd/test_probe_glusterd.py
index 54b99eec2..7403727e3 100644
--- a/tests/functional/glusterd/test_probe_glusterd.py
+++ b/tests/functional/glusterd/test_probe_glusterd.py
@@ -26,24 +26,6 @@ from glustolibs.gluster.gluster_init import is_glusterd_running
class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
- @classmethod
- def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s ", cls.__name__)
-
- def setUp(self):
- """
- setUp method for every test
- """
- # calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
-
- def tearDown(self):
- """
- tearDown for every test
- """
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
def test_peer_probe_invalid_ip_nonexist_host_nonexist_ip(self):
'''
diff --git a/tests/functional/glusterd/test_probe_glusterd_down.py b/tests/functional/glusterd/test_probe_glusterd_down.py
new file mode 100644
index 000000000..c851bf104
--- /dev/null
+++ b/tests/functional/glusterd/test_probe_glusterd_down.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2020-2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.peer_ops import peer_probe
+from glustolibs.gluster.lib_utils import is_core_file_created
+from glustolibs.gluster.peer_ops import peer_detach, is_peer_connected
+from glustolibs.gluster.gluster_init import stop_glusterd, start_glusterd
+from glustolibs.misc.misc_libs import bring_down_network_interface
+
+
+class PeerProbeWhenGlusterdDown(GlusterBaseClass):
+
+ def test_peer_probe_when_glusterd_down(self):
+ # pylint: disable=too-many-statements
+ '''
+ Test script to verify the behavior when we try to peer
+ probe a valid node whose glusterd is down
+ Also post validate to make sure no core files are created
+ under "/", /var/log/core and /tmp directory
+
+ Ref: BZ#1257394 Provide meaningful error on peer probe and peer detach
+ Test Steps:
+ 1 check the current peer status
+ 2 detach one of the valid nodes which is already part of cluster
+ 3 stop glusterd on that node
+ 4 try to attach above node to cluster, which must fail with
+ Transport End point error
+ 5 Recheck the test using hostname, expected to see same result
+ 6 start glusterd on that node
+ 7 halt/reboot the node
+ 8 try to peer probe the halted node, which must fail again.
+ 9 The only error accepted is
+ "peer probe: failed: Probe returned with Transport endpoint is not
+ connected"
+ 10 Check peer status and make sure no other nodes in peer reject state
+ '''
+
+ ret, test_timestamp, _ = g.run_local('date +%s')
+ test_timestamp = test_timestamp.strip()
+
+ # Detach one of the nodes which is part of the cluster
+ g.log.info("detaching server %s ", self.servers[1])
+ ret, _, err = peer_detach(self.mnode, self.servers[1])
+ msg = 'peer detach: failed: %s is not part of cluster\n' \
+ % self.servers[1]
+ if ret:
+ self.assertEqual(err, msg, "Failed to detach %s "
+ % (self.servers[1]))
+
+ # Bring down glusterd of the server which has been detached
+ g.log.info("Stopping glusterd on %s ", self.servers[1])
+ ret = stop_glusterd(self.servers[1])
+ self.assertTrue(ret, "Fail to stop glusterd on %s " % self.servers[1])
+
+ # Trying to peer probe the node whose glusterd was stopped using IP
+ g.log.info("Peer probing %s when glusterd down ", self.servers[1])
+ ret, _, err = peer_probe(self.mnode, self.servers[1])
+ self.assertNotEqual(ret, 0, "Peer probe should not pass when "
+ "glusterd is down")
+ self.assertEqual(err, "peer probe: failed: Probe returned with "
+ "Transport endpoint is not connected\n")
+
+ # Trying to peer probe the same node with hostname
+ g.log.info("Peer probing node %s using hostname with glusterd down ",
+ self.servers[1])
+ hostname = g.run(self.servers[1], "hostname")
+ ret, _, err = peer_probe(self.mnode, hostname[1].strip())
+ self.assertNotEqual(ret, 0, "Peer probe should not pass when "
+ "glusterd is down")
+ self.assertEqual(err, "peer probe: failed: Probe returned with"
+ " Transport endpoint is not connected\n")
+
+ # Start glusterd again for the next set of test steps
+ g.log.info("starting glusterd on %s ", self.servers[1])
+ ret = start_glusterd(self.servers[1])
+ self.assertTrue(ret, "glusterd couldn't start successfully on %s"
+ % self.servers[1])
+
+ # Bring down the network for sometime
+ network_status = bring_down_network_interface(self.servers[1], 150)
+
+ # Peer probing the node using IP when it is still not online
+ g.log.info("Peer probing node %s when network is down",
+ self.servers[1])
+ ret, _, err = peer_probe(self.mnode, self.servers[1])
+ self.assertNotEqual(ret, 0, "Peer probe passed when it was expected to"
+ " fail")
+ self.assertEqual(err.split("\n")[0], "peer probe: failed: Probe "
+ "returned with Transport endpoint"
+ " is not connected")
+
+ # Peer probing the node using hostname when it is still not online
+ g.log.info("Peer probing node %s using hostname which is still "
+ "not online ",
+ self.servers[1])
+ ret, _, err = peer_probe(self.mnode, hostname[1].strip())
+ self.assertNotEqual(ret, 0, "Peer probe should not pass when node "
+ "has not come online")
+ self.assertEqual(err.split("\n")[0], "peer probe: failed: Probe "
+ "returned with Transport endpoint"
+ " is not connected")
+
+ ret, _, _ = network_status.async_communicate()
+ if ret != 0:
+ g.log.error("Failed to perform network interface ops")
+
+ # Peer probe the node must pass
+ g.log.info("peer probing node %s", self.servers[1])
+ ret, _, err = peer_probe(self.mnode, self.servers[1])
+ self.assertEqual(ret, 0, "Peer probe has failed unexpectedly with "
+ "%s " % err)
+
+ # Checking if core file created in "/", "/tmp" and "/var/log/core"
+ ret = is_core_file_created(self.servers, test_timestamp)
+ self.assertTrue(ret, "core file found")
+
+ def tearDown(self):
+ g.log.info("Peering any nodes which are not part of cluster as "
+ "part of cleanup")
+ for server in self.servers:
+ if not is_peer_connected(self.mnode, server):
+ ret, _, err = peer_probe(self.mnode, server)
+ if ret:
+ raise ExecutionError("Peer probe failed with %s " % err)
diff --git a/tests/functional/glusterd/test_probe_hostname.py b/tests/functional/glusterd/test_probe_hostname.py
index fde3557bb..12a30ff67 100644
--- a/tests/functional/glusterd/test_probe_hostname.py
+++ b/tests/functional/glusterd/test_probe_hostname.py
@@ -39,7 +39,7 @@ class TestPeerProbe(GlusterBaseClass):
raise ExecutionError("Peer detach failed")
g.log.info("Peer detach SUCCESSFUL.")
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
@@ -64,7 +64,7 @@ class TestPeerProbe(GlusterBaseClass):
"servers %s", self.servers)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_peer_probe_validation(self):
# pylint: disable=too-many-statements
diff --git a/tests/functional/glusterd/test_profile_info_without_having_profile_started.py b/tests/functional/glusterd/test_profile_info_without_having_profile_started.py
new file mode 100644
index 000000000..e2403a93b
--- /dev/null
+++ b/tests/functional/glusterd/test_profile_info_without_having_profile_started.py
@@ -0,0 +1,188 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Test Description:
+ Tests to check profile info without starting profile
+"""
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.profile_ops import (profile_start, profile_info)
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.io.utils import validate_io_procs
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.lib_utils import is_core_file_created
+from glustolibs.gluster.gluster_init import is_glusterd_running
+from glustolibs.gluster.volume_ops import get_volume_list
+from glustolibs.gluster.volume_libs import (cleanup_volume, setup_volume)
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'arbiter',
+ 'distributed-arbiter'], ['glusterfs']])
+class TestProfileInfoWithoutHavingProfileStarted(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Uploading file_dir script in all client direcotries
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s"
+ % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+ # Creating Volume and mounting volume.
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+ g.log.info("Volme created and mounted successfully : %s",
+ self.volname)
+
+ def tearDown(self):
+ # Unmounting and cleaning volume.
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Unable to delete volume % s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
+
+ # clean up all volumes
+ vol_list = get_volume_list(self.mnode)
+ if not vol_list:
+ raise ExecutionError("Failed to get the volume list")
+ for volume in vol_list:
+ ret = cleanup_volume(self.mnode, volume)
+ if not ret:
+ raise ExecutionError("Unable to delete volume % s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_profile_operations(self):
+ """
+ Test Case:
+ 1) Create a volume and start it.
+ 2) Mount volume on client and start IO.
+ 3) Start profile on the volume.
+ 4) Run profile info and see if all bricks are present or not.
+ 6) Create another volume.
+ 7) Run profile info without starting the profile.
+ 8) Run profile info with all possible options without starting
+ the profile.
+ """
+ # Timestamp of current test case of start time
+ ret, test_timestamp, _ = g.run_local('date +%s')
+ test_timestamp = test_timestamp.strip()
+
+ # Start IO on mount points.
+ g.log.info("Starting IO on all mounts...")
+ self.all_mounts_procs = []
+ counter = 1
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dir-depth 4 "
+ "--dir-length 6 "
+ "--dirname-start-num %d "
+ "--max-num-of-dirs 3 "
+ "--num-of-files 5 %s" % (
+ self.script_upload_path,
+ counter, mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ self.all_mounts_procs.append(proc)
+ counter += 1
+
+ # Start profile on volume.
+ ret, _, _ = profile_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start profile on volume: %s"
+ % self.volname)
+ g.log.info("Successfully started profile on volume: %s",
+ self.volname)
+
+ # Getting and checking output of profile info.
+ ret, out, _ = profile_info(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to run profile info on volume: %s"
+ % self.volname)
+ g.log.info("Successfully executed profile info on volume: %s",
+ self.volname)
+
+ # Checking if all bricks are present in profile info.
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ for brick in brick_list:
+ self.assertTrue(brick in out,
+ "Brick %s not a part of profile info output."
+ % brick)
+ g.log.info("Brick %s showing in profile info output.",
+ brick)
+
+ # Validate IO
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
+ g.log.info("IO validation complete.")
+
+ # Create and start a volume
+ self.volume['name'] = "volume_2"
+ self.volname = "volume_2"
+ ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
+ self.assertTrue(ret, "Failed to create and start volume")
+ g.log.info("Successfully created and started volume_2")
+
+ # Check profile info on volume without starting profile
+ ret, _, _ = profile_info(self.mnode, self.volname)
+ self.assertNotEqual(ret, 0, "Unexpected:Successfully ran profile info"
+ " on volume: %s" % self.volname)
+ g.log.info("Expected: Failed to run pofile info on volume: %s",
+ self.volname)
+
+ # Running profile info with different profile options.
+ profile_options = ('peek', 'incremental', 'clear',
+ 'incremental peek', 'cumulative')
+ for option in profile_options:
+ # Getting and checking output of profile info.
+ ret, _, _ = profile_info(self.mnode, self.volname,
+ options=option)
+ self.assertNotEqual(ret, 0,
+ "Unexpected: Successfully ran profile info"
+ " %s on volume: %s" % (option, self.volname))
+ g.log.info("Expected: Failed to execute profile info %s on"
+ " volume: %s", option, self.volname)
+
+ # Chekcing for core files.
+ ret = is_core_file_created(self.servers, test_timestamp)
+ self.assertTrue(ret, "glusterd service should not crash")
+ g.log.info("No core file found, glusterd service running "
+ "successfully")
+
+ # Checking whether glusterd is running or not
+ ret = is_glusterd_running(self.servers)
+ self.assertEqual(ret, 0, "Glusterd has crashed on nodes.")
+ g.log.info("No glusterd crashes observed.")
diff --git a/tests/functional/glusterd/test_profile_operations.py b/tests/functional/glusterd/test_profile_operations.py
index ae0a968eb..9eb304a92 100644
--- a/tests/functional/glusterd/test_profile_operations.py
+++ b/tests/functional/glusterd/test_profile_operations.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,6 +20,7 @@
"""
from glusto.core import Glusto as g
+
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.profile_ops import (profile_start, profile_info,
@@ -40,16 +41,14 @@ class TestProfileOpeartions(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -57,8 +56,7 @@ class TestProfileOpeartions(GlusterBaseClass):
cls.clients)
def setUp(self):
-
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume and mounting volume.
ret = self.setup_volume_and_mount_volume(self.mounts)
if not ret:
@@ -85,7 +83,7 @@ class TestProfileOpeartions(GlusterBaseClass):
raise ExecutionError("Unable to delete volume % s" % volume)
g.log.info("Volume deleted successfully : %s", volume)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_profile_operations(self):
@@ -113,14 +111,14 @@ class TestProfileOpeartions(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dir-depth 4 "
"--dir-length 6 "
"--dirname-start-num %d "
"--max-num-of-dirs 3 "
- "--num-of-files 5 %s"
- % (self.script_upload_path, counter,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path,
+ counter, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
diff --git a/tests/functional/glusterd/test_profile_operations_with_one_node_down.py b/tests/functional/glusterd/test_profile_operations_with_one_node_down.py
index 762d7cea5..ad745cdf7 100644
--- a/tests/functional/glusterd/test_profile_operations_with_one_node_down.py
+++ b/tests/functional/glusterd/test_profile_operations_with_one_node_down.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,9 +19,10 @@
Tests to check basic profile operations with one node down.
"""
-from time import sleep
from random import randint
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.profile_ops import (profile_start, profile_info,
@@ -30,8 +31,8 @@ from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import validate_io_procs
from glustolibs.gluster.brick_libs import get_online_bricks_list
from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
- is_glusterd_running)
-from glustolibs.gluster.peer_ops import is_peer_connected
+ wait_for_glusterd_to_start)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
@runs_on([['distributed-replicated', 'dispersed', 'distributed-dispersed'],
@@ -40,16 +41,14 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -58,7 +57,7 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass):
def setUp(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume and mounting volume.
ret = self.setup_volume_and_mount_volume(self.mounts)
if not ret:
@@ -76,16 +75,11 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass):
g.log.info("Successfully started glusterd.")
# Checking if peer is connected
- counter = 0
- while counter < 30:
- ret = is_peer_connected(self.mnode, self.servers)
- counter += 1
- if ret:
- break
- sleep(3)
- if not ret:
- ExecutionError("Peers are not in connected state.")
- g.log.info("Peers are in connected state.")
+ for server in self.servers:
+ ret = wait_for_peers_to_connect(self.mnode, server)
+ if not ret:
+ ExecutionError("Peers are not in connected state.")
+ g.log.info("Peers are in connected state.")
# Unmounting and cleaning volume.
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
@@ -93,7 +87,7 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass):
raise ExecutionError("Unable to delete volume % s" % self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_profile_operations_with_one_node_down(self):
@@ -116,14 +110,14 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dir-depth 4 "
"--dirname-start-num %d "
"--dir-length 6 "
"--max-num-of-dirs 3 "
- "--num-of-files 5 %s"
- % (self.script_upload_path, counter,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path,
+ counter, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
@@ -143,13 +137,11 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass):
ret = stop_glusterd(self.servers[self.random_server])
self.assertTrue(ret, "Failed to stop glusterd on one node.")
g.log.info("Successfully stopped glusterd on one node.")
- counter = 0
- while counter > 20:
- ret = is_glusterd_running(self.servers[self.random_server])
- if ret:
- break
- counter += 1
- sleep(3)
+ ret = wait_for_glusterd_to_start(self.servers[self.random_server])
+ self.assertFalse(ret, "glusterd is still running on %s"
+ % self.servers[self.random_server])
+ g.log.info("Glusterd stop on the nodes : %s "
+ "succeeded", self.servers[self.random_server])
# Getting and checking output of profile info.
ret, out, _ = profile_info(self.mnode, self.volname)
@@ -196,13 +188,7 @@ class TestProfileOpeartionsWithOneNodeDown(GlusterBaseClass):
g.log.info("Successfully started glusterd.")
# Checking if peer is connected
- counter = 0
- while counter < 30:
- ret = is_peer_connected(self.mnode, self.servers)
- counter += 1
- if ret:
- break
- sleep(3)
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
self.assertTrue(ret, "Peers are not in connected state.")
g.log.info("Peers are in connected state.")
diff --git a/tests/functional/glusterd/test_profile_simultaneously_on_different_nodes.py b/tests/functional/glusterd/test_profile_simultaneously_on_different_nodes.py
new file mode 100644
index 000000000..33d74daf7
--- /dev/null
+++ b/tests/functional/glusterd/test_profile_simultaneously_on_different_nodes.py
@@ -0,0 +1,185 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Test Description:
+ Tests to test profile simultaneously on different nodes.
+"""
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.profile_ops import profile_start
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.io.utils import validate_io_procs
+from glustolibs.gluster.lib_utils import is_core_file_created
+from glustolibs.gluster.gluster_init import is_glusterd_running
+from glustolibs.gluster.volume_ops import get_volume_list
+from glustolibs.gluster.volume_libs import (cleanup_volume, setup_volume)
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'], ['glusterfs']])
+class TestProfileSimultaneouslyOnDifferentNodes(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Uploading file_dir script in all client direcotries
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s"
+ % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+ # Creating Volume and mounting volume.
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+ g.log.info("Volume created and mounted successfully : %s",
+ self.volname)
+
+ def tearDown(self):
+ # Unmounting and cleaning volume.
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Unable to delete volume % s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
+
+ # clean up all volumes
+ vol_list = get_volume_list(self.mnode)
+ if not vol_list:
+ raise ExecutionError("Failed to get the volume list")
+ for volume in vol_list:
+ ret = cleanup_volume(self.mnode, volume)
+ if not ret:
+ raise ExecutionError("Unable to delete volume % s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_profile_simultaneously_on_different_nodes(self):
+ """
+ Test Case:
+ 1) Create a volume and start it.
+ 2) Mount volume on client and start IO.
+ 3) Start profile on the volume.
+ 4) Create another volume.
+ 5) Start profile on the volume.
+ 6) Run volume status in a loop in one of the node.
+ 7) Run profile info for the new volume on one of the other node
+ 8) Run profile info for the new volume in loop for 100 times on
+ the other node
+ """
+ # Timestamp of current test case of start time
+ ret, test_timestamp, _ = g.run_local('date +%s')
+ test_timestamp = test_timestamp.strip()
+
+ # Start IO on mount points.
+ self.all_mounts_procs = []
+ counter = 1
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dir-depth 4 "
+ "--dir-length 6 "
+ "--dirname-start-num %d "
+ "--max-num-of-dirs 3 "
+ "--num-of-files 5 %s" % (
+ self.script_upload_path,
+ counter, mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ self.all_mounts_procs.append(proc)
+ counter += 1
+
+ # Start profile on volume.
+ ret, _, _ = profile_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start profile on volume: %s"
+ % self.volname)
+ g.log.info("Successfully started profile on volume: %s",
+ self.volname)
+
+ # Validate IO
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
+ g.log.info("IO validation complete.")
+
+ # Create and start a volume
+ self.volume['name'] = "volume_2"
+ self.volname = "volume_2"
+ ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
+ self.assertTrue(ret, "Failed to create and start volume")
+ g.log.info("Successfully created and started volume_2")
+
+ # Start profile on volume.
+ ret, _, _ = profile_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start profile on volume: %s"
+ % self.volname)
+ g.log.info("Successfully started profile on volume: %s",
+ self.volname)
+
+ # Run volume status on one of the node in loop
+ cmd = "for i in `seq 1 100`;do gluster v status;done"
+ proc1 = g.run_async(self.servers[1], cmd)
+
+ # Check profile on one of the other node
+ cmd = "gluster v profile %s info" % self.volname
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to run profile info on volume: %s"
+ " on node %s" % (self.volname, self.mnode))
+ g.log.info("Successfully run pofile info on volume: %s on node %s",
+ self.volname, self.mnode)
+
+ # Run volume profile info on one of the other node in loop
+ cmd = """for i in `seq 1 100`;do gluster v profile %s info;
+ done""" % self.volname
+ proc2 = g.run_async(self.servers[3], cmd)
+
+ ret1, _, _ = proc1.async_communicate()
+ ret2, _, _ = proc2.async_communicate()
+
+ self.assertEqual(ret1, 0, "Failed to run volume status in a loop"
+ " on node %s" % self.servers[1])
+ g.log.info("Successfully running volume status in a loop on node"
+ " %s", self.servers[1])
+
+ self.assertEqual(ret2, 0, "Failed to run profile info in a loop"
+ " on node %s" % self.servers[3])
+ g.log.info("Successfully running volume status in a loop on node"
+ " %s", self.servers[3])
+
+ # Chekcing for core files.
+ ret = is_core_file_created(self.servers, test_timestamp)
+ self.assertTrue(ret, "glusterd service should not crash")
+ g.log.info("No core file found, glusterd service running "
+ "successfully")
+
+ # Checking whether glusterd is running or not
+ ret = is_glusterd_running(self.servers)
+ self.assertEqual(ret, 0, "Glusterd has crashed on nodes.")
+ g.log.info("No glusterd crashes observed.")
diff --git a/tests/functional/glusterd/test_profile_start_with_quorum_not_met.py b/tests/functional/glusterd/test_profile_start_with_quorum_not_met.py
index 7fffac89b..2dd2e2f26 100644
--- a/tests/functional/glusterd/test_profile_start_with_quorum_not_met.py
+++ b/tests/functional/glusterd/test_profile_start_with_quorum_not_met.py
@@ -31,7 +31,7 @@ class TestProfileStartWithQuorumNotMet(GlusterBaseClass):
def setUp(self):
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
g.log.info("Started creating volume")
@@ -42,16 +42,6 @@ class TestProfileStartWithQuorumNotMet(GlusterBaseClass):
def tearDown(self):
- # Setting Quorum ratio to 51%
- self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'}
- ret = set_volume_options(self.mnode, 'all', self.quorum_perecent)
- if not ret:
- raise ExecutionError("gluster volume set all cluster."
- "server-quorum-ratio percentage "
- "Failed :%s" % self.servers)
- g.log.info("gluster volume set all cluster.server-quorum-ratio 51 "
- "percentage enabled successfully on :%s", self.servers)
-
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
@@ -59,7 +49,7 @@ class TestProfileStartWithQuorumNotMet(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_profile_start_with_quorum_not_met(self):
# pylint: disable=too-many-statements
diff --git a/tests/functional/glusterd/test_quorum_remove_brick.py b/tests/functional/glusterd/test_quorum_remove_brick.py
index e4caa065f..b936e4c48 100644
--- a/tests/functional/glusterd/test_quorum_remove_brick.py
+++ b/tests/functional/glusterd/test_quorum_remove_brick.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -26,6 +26,7 @@ from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
is_glusterd_running)
+from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected
from glustolibs.gluster.brick_libs import get_all_bricks
from glustolibs.gluster.brick_ops import remove_brick
from glustolibs.gluster.volume_libs import form_bricks_list_to_remove_brick
@@ -39,7 +40,7 @@ class TestServerQuorumNotMet(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
ret = self.setup_volume()
@@ -56,30 +57,19 @@ class TestServerQuorumNotMet(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to start glusterd on %s"
% self.random_server)
+ # Takes 5 seconds to restart glusterd into peer connected state
+ sleep(5)
g.log.info("Glusterd started successfully on %s", self.random_server)
# checking for peer status from every node
- count = 0
- while count < 80:
- ret = self.validate_peers_are_connected()
- if ret:
- break
- sleep(2)
- count += 1
-
+ ret = is_peer_connected(self.mnode, self.servers)
if not ret:
- raise ExecutionError("Servers are not in peer probed state")
+ ret = peer_probe_servers(self.mnode, self.random_server)
+ if not ret:
+ raise ExecutionError("Failed to peer probe failed in "
+ "servers %s" % self.random_server)
g.log.info("All peers are in connected state")
- # Setting server-quorum-ratio to 51%
- ret = set_volume_options(self.mnode, 'all',
- {'cluster.server-quorum-ratio': '51%'})
- if not ret:
- raise ExecutionError("Failed to set server quorum ratio for %s"
- % self.servers)
- g.log.info("Able to set server quorum ratio successfully for %s",
- self.servers)
-
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
@@ -87,7 +77,7 @@ class TestServerQuorumNotMet(GlusterBaseClass):
% self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_quorum_remove_brick(self):
'''
diff --git a/tests/functional/glusterd/test_quorum_syslog.py b/tests/functional/glusterd/test_quorum_syslog.py
index 4e66afd0f..a956e10af 100644
--- a/tests/functional/glusterd/test_quorum_syslog.py
+++ b/tests/functional/glusterd/test_quorum_syslog.py
@@ -35,7 +35,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
"""
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# checking for peer status from every node
ret = cls.validate_peers_are_connected()
@@ -47,7 +47,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
+
self.volume_list = []
# create a volume
ret = setup_volume(self.mnode, self.all_servers_info,
@@ -119,7 +120,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", volume)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_quorum_messages_in_syslog_with_more_volumes(self):
"""
diff --git a/tests/functional/glusterd/test_readonly_option_on_volume.py b/tests/functional/glusterd/test_readonly_option_on_volume.py
index 064d1ac8a..d4250b164 100644
--- a/tests/functional/glusterd/test_readonly_option_on_volume.py
+++ b/tests/functional/glusterd/test_readonly_option_on_volume.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
"""
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -32,16 +33,14 @@ class TestReadOnlyOptionOnVolume(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.counter = 1
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -53,7 +52,7 @@ class TestReadOnlyOptionOnVolume(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
ret = self.setup_volume_and_mount_volume(self.mounts)
@@ -99,7 +98,7 @@ class TestReadOnlyOptionOnVolume(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 2 "
@@ -136,7 +135,7 @@ class TestReadOnlyOptionOnVolume(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 2 "
diff --git a/tests/functional/glusterd/test_rebalance_hang.py b/tests/functional/glusterd/test_rebalance_hang.py
index a826703c1..90b31d222 100644
--- a/tests/functional/glusterd/test_rebalance_hang.py
+++ b/tests/functional/glusterd/test_rebalance_hang.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,12 +14,12 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import (volume_create, volume_start,
get_volume_list, get_volume_status)
-from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.brick_libs import (
+ get_all_bricks, wait_for_bricks_to_be_online)
from glustolibs.gluster.volume_libs import (cleanup_volume)
from glustolibs.gluster.peer_ops import (peer_probe, peer_detach,
peer_probe_servers,
@@ -32,7 +32,8 @@ from glustolibs.gluster.rebalance_ops import (rebalance_start,
from glustolibs.gluster.mount_ops import mount_volume, umount_volume
from glustolibs.io.utils import validate_io_procs
from glustolibs.gluster.gluster_init import (start_glusterd, stop_glusterd,
- is_glusterd_running)
+ is_glusterd_running,
+ wait_for_glusterd_to_start)
@runs_on([['distributed'], ['glusterfs']])
@@ -46,7 +47,7 @@ class TestRebalanceHang(GlusterBaseClass):
raise ExecutionError("Failed to detach servers %s"
% self.servers)
g.log.info("Peer detach SUCCESSFUL.")
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
@@ -61,6 +62,11 @@ class TestRebalanceHang(GlusterBaseClass):
vol_list = get_volume_list(self.mnode)
if vol_list is not None:
for volume in vol_list:
+ # check all bricks are online
+ ret = wait_for_bricks_to_be_online(self.mnode, volume)
+ if not ret:
+ raise ExecutionError("Failed to bring bricks online"
+ "for volume %s" % volume)
ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Failed to cleanup volume")
@@ -76,7 +82,7 @@ class TestRebalanceHang(GlusterBaseClass):
"servers %s" % self.servers)
g.log.info("Peer probe success for detached "
"servers %s", self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_rebalance_hang(self):
"""
@@ -141,16 +147,14 @@ class TestRebalanceHang(GlusterBaseClass):
"do mkdir l1_dir.$i/l2_dir.$j ; "
"for k in `seq 1 10` ; "
"do dd if=/dev/urandom of=l1_dir.$i/l2_dir.$j/test.$k "
- "bs=128k count=$k ; "
- "done ; "
- "done ; "
- "done ; "
+ "bs=128k count=$k ; done ; done ; done ; "
% (self.mounts[0].mountpoint))
proc = g.run_async(self.mounts[0].client_system, command,
user=self.mounts[0].user)
self.all_mounts_procs.append(proc)
self.io_validation_complete = False
+
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
@@ -186,14 +190,8 @@ class TestRebalanceHang(GlusterBaseClass):
# Start glusterd on the node where it is stopped
ret = start_glusterd(self.servers[1])
self.assertTrue(ret, "glusterd start on the node failed")
- count = 0
- while count < 60:
- ret = is_glusterd_running(self.servers[1])
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "glusterd is not running on %s"
- % self.servers[1])
+ ret = wait_for_glusterd_to_start(self.servers[1])
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % self.servers[1])
g.log.info("Glusterd start on the nodes : %s "
"succeeded", self.servers[1])
diff --git a/tests/functional/glusterd/test_rebalance_new_node.py b/tests/functional/glusterd/test_rebalance_new_node.py
index a9cd0fea6..6c5777543 100644
--- a/tests/functional/glusterd/test_rebalance_new_node.py
+++ b/tests/functional/glusterd/test_rebalance_new_node.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,6 +15,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from glusto.core import Glusto as g
+
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume)
@@ -33,8 +34,7 @@ from glustolibs.gluster.mount_ops import is_mounted
class TestRebalanceStatus(GlusterBaseClass):
def setUp(self):
-
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# check whether peers are in connected state
ret = self.validate_peers_are_connected()
@@ -51,11 +51,9 @@ class TestRebalanceStatus(GlusterBaseClass):
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", self.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(self.clients, script_local_path)
+ ret = upload_scripts(self.clients, self.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
self.clients)
@@ -67,6 +65,7 @@ class TestRebalanceStatus(GlusterBaseClass):
# unmount the volume
ret = self.unmount_volume(self.mounts)
self.assertTrue(ret, "Volume unmount failed for %s" % self.volname)
+ g.log.info("Volume unmounted successfully : %s", self.volname)
# get volumes list and clean up all the volumes
vol_list = get_volume_list(self.mnode)
@@ -87,13 +86,7 @@ class TestRebalanceStatus(GlusterBaseClass):
raise ExecutionError("Peer probe failed to all the servers from "
"the node.")
- GlusterBaseClass.tearDown.im_func(self)
-
- @classmethod
- def tearDownClass(cls):
-
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDownClass.im_func(cls)
+ self.get_super_method(self, 'tearDown')()
def test_rebalance_status_from_newly_probed_node(self):
@@ -131,7 +124,7 @@ class TestRebalanceStatus(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 10 "
"--dir-length 5 "
diff --git a/tests/functional/glusterd/test_rebalance_spurious.py b/tests/functional/glusterd/test_rebalance_spurious.py
index c9dbed035..fa9f22084 100644
--- a/tests/functional/glusterd/test_rebalance_spurious.py
+++ b/tests/functional/glusterd/test_rebalance_spurious.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -25,12 +25,13 @@ from glustolibs.gluster.peer_ops import (peer_probe, peer_detach,
peer_probe_servers,
nodes_from_pool_list,
is_peer_connected)
-from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.lib_utils import (
+ form_bricks_list, get_servers_bricks_dict)
from glustolibs.gluster.brick_ops import remove_brick
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.rebalance_ops import (rebalance_start,
wait_for_fix_layout_to_complete)
-from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.glusterdir import mkdir, get_dir_contents
from glustolibs.gluster.mount_ops import mount_volume, umount_volume
from glustolibs.gluster.glusterfile import get_fattr
@@ -46,7 +47,7 @@ class TestSpuriousRebalance(GlusterBaseClass):
if ret != 0:
raise ExecutionError("Peer detach failed")
g.log.info("Peer detach SUCCESSFUL.")
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
@@ -76,7 +77,21 @@ class TestSpuriousRebalance(GlusterBaseClass):
"servers %s" % self.servers)
g.log.info("Peer probe success for detached "
"servers %s", self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+
+ bricks = get_servers_bricks_dict(self.servers,
+ self.all_servers_info)
+
+ # Checking brick dir and cleaning it.
+ for server in self.servers:
+ for brick in bricks[server]:
+ if get_dir_contents(server, brick):
+ cmd = "rm -rf " + brick + "/*"
+ ret, _, _ = g.run(server, cmd)
+ if ret:
+ raise ExecutionError("Failed to delete the brick "
+ "dirs of deleted volume.")
+
+ self.get_super_method(self, 'tearDown')()
def test_spurious_rebalance(self):
"""
diff --git a/tests/functional/glusterd/test_rebalance_start_not_failed_with_socket_path_too_long.py b/tests/functional/glusterd/test_rebalance_start_not_failed_with_socket_path_too_long.py
new file mode 100644
index 000000000..87cab40d0
--- /dev/null
+++ b/tests/functional/glusterd/test_rebalance_start_not_failed_with_socket_path_too_long.py
@@ -0,0 +1,173 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""
+Description:
+ Test Rebalance should start successfully if name of volume more than 108
+ chars
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.brick_ops import add_brick
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.mount_ops import umount_volume, mount_volume
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start,
+ wait_for_rebalance_to_complete
+)
+from glustolibs.gluster.volume_libs import (
+ volume_start,
+ cleanup_volume
+)
+from glustolibs.gluster.volume_ops import volume_create, get_volume_list
+from glustolibs.io.utils import run_linux_untar
+
+
+class TestLookupDir(GlusterBaseClass):
+ def tearDown(self):
+ cmd = ("sed -i '/transport.socket.bind-address/d'"
+ " /etc/glusterfs/glusterd.vol")
+ ret, _, _ = g.run(self.mnode, cmd)
+ if ret:
+ raise ExecutionError("Failed to remove entry from 'glusterd.vol'")
+ for mount_dir in self.mount:
+ ret = umount_volume(self.clients[0], mount_dir)
+ if not ret:
+ raise ExecutionError("Failed to cleanup Volume")
+
+ vol_list = get_volume_list(self.mnode)
+ if vol_list is not None:
+ for volume in vol_list:
+ ret = cleanup_volume(self.mnode, volume)
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Volume deleted successfully : %s", volume)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_start_not_fail(self):
+ """
+ 1. On Node N1, Add "transport.socket.bind-address N1" in the
+ /etc/glusterfs/glusterd.vol
+ 2. Create a replicate (1X3) and disperse (4+2) volumes with
+ name more than 108 chars
+ 3. Mount the both volumes using node 1 where you added the
+ "transport.socket.bind-address" and start IO(like untar)
+ 4. Perform add-brick on replicate volume 3-bricks
+ 5. Start rebalance on replicated volume
+ 6. Perform add-brick for disperse volume 6 bricks
+ 7. Start rebalance of disperse volume
+ """
+ cmd = ("sed -i 's/end-volume/option "
+ "transport.socket.bind-address {}\\n&/g' "
+ "/etc/glusterfs/glusterd.vol".format(self.mnode))
+ disperse = ("disperse_e4upxjmtre7dl4797wedbp7r3jr8equzvmcae9f55t6z1"
+ "ffhrlk40jtnrzgo4n48fjf6b138cttozw3c6of3ze71n9urnjkshoi")
+ replicate = ("replicate_e4upxjmtre7dl4797wedbp7r3jr8equzvmcae9f55t6z1"
+ "ffhrlk40tnrzgo4n48fjf6b138cttozw3c6of3ze71n9urnjskahn")
+
+ volnames = (disperse, replicate)
+ for volume, vol_name in (
+ ("disperse", disperse), ("replicate", replicate)):
+
+ bricks_list = form_bricks_list(self.mnode, volume,
+ 6 if volume == "disperse" else 3,
+ self.servers,
+ self.all_servers_info)
+ if volume == "replicate":
+ ret, _, _ = volume_create(self.mnode, replicate,
+ bricks_list,
+ replica_count=3)
+
+ else:
+ ret, _, _ = volume_create(
+ self.mnode, disperse, bricks_list, force=True,
+ disperse_count=6, redundancy_count=2)
+
+ self.assertFalse(
+ ret,
+ "Unexpected: Volume create '{}' failed ".format(vol_name))
+ ret, _, _ = volume_start(self.mnode, vol_name)
+ self.assertFalse(ret, "Failed to start volume")
+
+ # Add entry in 'glusterd.vol'
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertFalse(
+ ret, "Failed to add entry in 'glusterd.vol' file")
+
+ self.list_of_io_processes = []
+
+ # mount volume
+ self.mount = ("/mnt/replicated_mount", "/mnt/disperse_mount")
+ for mount_dir, volname in zip(self.mount, volnames):
+ ret, _, _ = mount_volume(
+ volname, "glusterfs", mount_dir, self.mnode,
+ self.clients[0])
+ self.assertFalse(
+ ret, "Failed to mount the volume '{}'".format(mount_dir))
+
+ # Run IO
+ # Create a dir to start untar
+ # for mount_point in self.mount:
+ self.linux_untar_dir = "{}/{}".format(mount_dir, "linuxuntar")
+ ret = mkdir(self.clients[0], self.linux_untar_dir)
+ self.assertTrue(ret, "Failed to create dir linuxuntar for untar")
+
+ # Start linux untar on dir linuxuntar
+ ret = run_linux_untar(self.clients[:1], mount_dir,
+ dirs=tuple(['linuxuntar']))
+ self.list_of_io_processes += ret
+ self.is_io_running = True
+
+ # Add Brick to replicate Volume
+ bricks_list = form_bricks_list(
+ self.mnode, replicate, 3,
+ self.servers, self.all_servers_info, "replicate")
+ ret, _, _ = add_brick(
+ self.mnode, replicate, bricks_list, force=True)
+ self.assertFalse(ret, "Failed to add-brick '{}'".format(replicate))
+
+ # Trigger Rebalance on the volume
+ ret, _, _ = rebalance_start(self.mnode, replicate)
+ self.assertFalse(
+ ret, "Failed to start rebalance on the volume '{}'".format(
+ replicate))
+
+ # Add Brick to disperse Volume
+ bricks_list = form_bricks_list(
+ self.mnode, disperse, 6,
+ self.servers, self.all_servers_info, "disperse")
+
+ ret, _, _ = add_brick(
+ self.mnode, disperse, bricks_list, force=True)
+ self.assertFalse(ret, "Failed to add-brick '{}'".format(disperse))
+
+ # Trigger Rebalance on the volume
+ ret, _, _ = rebalance_start(self.mnode, disperse)
+ self.assertFalse(
+ ret,
+ "Failed to start rebalance on the volume {}".format(disperse))
+
+ # Check if Rebalance is completed on both the volume
+ for volume in (replicate, disperse):
+ ret = wait_for_rebalance_to_complete(
+ self.mnode, volume, timeout=600)
+ self.assertTrue(
+ ret, "Rebalance is not Compleated on Volume '{}'".format(
+ volume))
diff --git a/tests/functional/glusterd/test_rebalance_when_quorum_not_met.py b/tests/functional/glusterd/test_rebalance_when_quorum_not_met.py
new file mode 100644
index 000000000..dcc49936b
--- /dev/null
+++ b/tests/functional/glusterd/test_rebalance_when_quorum_not_met.py
@@ -0,0 +1,154 @@
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Test rebalance operation when quorum not met
+"""
+import random
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
+ is_glusterd_running)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
+from glustolibs.gluster.rebalance_ops import rebalance_start
+from glustolibs.gluster.volume_ops import (volume_status,
+ volume_stop, volume_start,
+ set_volume_options)
+
+
+@runs_on([['distributed', 'dispersed', 'distributed-dispersed'],
+ ['glusterfs']])
+class TestServerQuorumNotMet(GlusterBaseClass):
+ def setUp(self):
+ """
+ setUp method for every test
+ """
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Volume creation failed: %s" % self.volname)
+
+ def tearDown(self):
+ """
+ tearDown for every test
+ """
+
+ ret = is_glusterd_running(self.random_server)
+ if ret:
+ ret = start_glusterd(self.random_server)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.random_server)
+
+ # checking for peer status from every node
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ self.assertTrue(ret, "glusterd is not connected %s with peer %s"
+ % (self.mnode, self.servers))
+
+ # stopping the volume and Cleaning up the volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed Cleanup the Volume %s"
+ % self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_quorum(self):
+ '''
+ -> Create volume
+ -> Stop the volume
+ -> Enabling serve quorum
+ -> start the volume
+ -> Set server quorum ratio to 95%
+ -> Stop the glusterd of any one of the node
+ -> Perform rebalance operation operation
+ -> Check gluster volume status
+ -> start glusterd
+ '''
+ # Stop the Volume
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to stop the volume %s" % self.volname)
+ g.log.info("Volume stopped successfully %s", self.volname)
+
+ # Enabling server quorum
+ ret = set_volume_options(self.mnode, self.volname,
+ {'cluster.server-quorum-type': 'server'})
+ self.assertTrue(ret, "Failed to set quorum type for volume %s"
+ % self.volname)
+ g.log.info("Able to set quorum type successfully for %s", self.volname)
+
+ # Start the volume
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start the volume %s"
+ % self.volname)
+ g.log.info("Volume started successfully %s", self.volname)
+
+ # Setting Quorum ratio in percentage
+ ret = set_volume_options(self.mnode, 'all',
+ {'cluster.server-quorum-ratio': '95%'})
+ self.assertTrue(ret, "Failed to set server quorum ratio on %s"
+ % self.servers)
+ g.log.info("Able to set server quorum ratio successfully on %s",
+ self.servers)
+
+ # Stopping glusterd
+ self.random_server = random.choice(self.servers[1:])
+ ret = stop_glusterd(self.random_server)
+ self.assertTrue(ret, "Failed to stop glusterd on %s"
+ % self.random_server)
+ g.log.info("Glusterd stopped successfully on %s", self.random_server)
+
+ msg = ("volume rebalance: " + self.volname + ": failed: Quorum not "
+ "met. Volume operation "
+ "not allowed")
+
+ # Start Rebalance
+ ret, _, err = rebalance_start(self.mnode, self.volname)
+ self.assertNotEqual(ret, 0, "Unexpected: Rebalance should fail when "
+ "quorum is in not met condition but "
+ "Rebalance succeeded %s" % self.volname)
+ g.log.info("Expected: Rebalance failed when quorum is in not met "
+ "condition %s", self.volname)
+
+ # Checking Rebalance failed message
+ self.assertIn(msg, err, "Error message is not correct for rebalance "
+ "operation when quorum not met")
+ g.log.info("Error message is correct for rebalance operation "
+ "when quorum not met")
+
+ # Volume Status
+ ret, out, _ = volume_status(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to get volume status for %s"
+ % self.volname)
+ g.log.info("Successful in getting volume status for %s", self.volname)
+
+ # Checking volume status message
+ self.assertNotIn('rebalance', out, "Unexpected: Found rebalance task "
+ "in vol status of %s"
+ % self.volname)
+ g.log.info("Expected: Not Found rebalance task in vol status of %s",
+ self.volname)
+
+ # Starting glusterd
+ ret = start_glusterd(self.random_server)
+ self.assertTrue(ret, "Failed to start glusterd on %s"
+ % self.random_server)
+ g.log.info("Glusted started successfully on %s", self.random_server)
diff --git a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
index 3b81dc6bd..40012b009 100644
--- a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
+++ b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -44,7 +44,7 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
if ret != 0:
raise ExecutionError("Peer detach failed")
g.log.info("Peer detach SUCCESSFUL.")
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
@@ -53,7 +53,6 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
ret = umount_volume(self.mounts[0].client_system,
self.mounts[0].mountpoint, mtype=self.mount_type)
self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
- g.log.info("Successfully Unmounted Volume %s", self.volname)
# Clean up all volumes and peer probe to form cluster
vol_list = get_volume_list(self.mnode)
@@ -75,7 +74,7 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
"servers %s" % self.servers)
g.log.info("Peer probe success for detached "
"servers %s", self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_remove_brick(self):
"""
@@ -165,10 +164,12 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
user=self.mounts[0].user)
self.all_mounts_procs.append(proc)
self.io_validation_complete = False
+
# Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients")
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
remove_brick_list = bricks_list[2:4]
ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
diff --git a/tests/functional/glusterd/test_remove_brick_scenarios.py b/tests/functional/glusterd/test_remove_brick_scenarios.py
index 0dc006812..ee1d7052a 100644
--- a/tests/functional/glusterd/test_remove_brick_scenarios.py
+++ b/tests/functional/glusterd/test_remove_brick_scenarios.py
@@ -33,7 +33,7 @@ class TestRemoveBrickScenarios(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Override Volumes
cls.volume['voltype'] = {
@@ -44,7 +44,7 @@ class TestRemoveBrickScenarios(GlusterBaseClass):
def setUp(self):
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
ret = self.setup_volume_and_mount_volume(self.mounts)
@@ -62,7 +62,7 @@ class TestRemoveBrickScenarios(GlusterBaseClass):
raise ExecutionError("Unable to delete volume % s" % self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_remove_brick_scenarios(self):
# pylint: disable=too-many-statements
diff --git a/tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py b/tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py
index 463b1c4e7..b83ec78f5 100644
--- a/tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_remove_brick_when_quorum_not_met.py
@@ -30,8 +30,7 @@ from glustolibs.gluster.brick_ops import remove_brick
class TestRemoveBrickWhenQuorumNotMet(GlusterBaseClass):
def setUp(self):
-
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# check whether peers are in connected state
ret = self.validate_peers_are_connected()
@@ -58,13 +57,6 @@ class TestRemoveBrickWhenQuorumNotMet(GlusterBaseClass):
if not ret:
raise ExecutionError("Servers are not in connected state")
- # Setting quorum ratio to 51%
- ret = set_volume_options(self.mnode, 'all',
- {'cluster.server-quorum-ratio': '51%'})
- if not ret:
- raise ExecutionError("Failed to set server quorum ratio on %s"
- % self.volname)
-
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
@@ -72,7 +64,7 @@ class TestRemoveBrickWhenQuorumNotMet(GlusterBaseClass):
% self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_remove_brick_when_quorum_not_met(self):
diff --git a/tests/functional/glusterd/test_replace_brick_quorum_not_met.py b/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
index 4081c7d90..70c79f0e8 100644
--- a/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
+++ b/tests/functional/glusterd/test_replace_brick_quorum_not_met.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -60,15 +60,6 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
if not ret:
raise ExecutionError("Servers are not in peer probed state")
- # Setting Quorum ratio to 51%
- ret = set_volume_options(self.mnode, 'all',
- {'cluster.server-quorum-ratio': '51%'})
- if not ret:
- raise ExecutionError("Failed to set server quorum ratio on %s"
- % self.servers)
- g.log.info("Able to set server quorum ratio successfully on %s",
- self.servers)
-
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
@@ -86,7 +77,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
"dir's of deleted volume")
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_replace_brick_quorum(self):
@@ -156,7 +147,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
# on one of the server, Its not possible to check the brick status
# immediately in volume status after glusterd stop
count = 0
- while count < 100:
+ while count < 200:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 5:
@@ -204,7 +195,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
# on one of the servers, Its not possible to check the brick status
# immediately in volume status after glusterd start
count = 0
- while count < 100:
+ while count < 200:
vol_status = get_volume_status(self.mnode, self.volname)
servers_count = len(vol_status[self.volname].keys())
if servers_count == 6:
@@ -214,7 +205,7 @@ class TestReplaceBrickWhenQuorumNotMet(GlusterBaseClass):
# Checking bricks are online or not
count = 0
- while count < 100:
+ while count < 200:
ret = are_bricks_online(self.mnode, self.volname,
self.brick_list[0:6])
if ret:
diff --git a/tests/functional/glusterd/test_reserve_limt_change_while_rebalance.py b/tests/functional/glusterd/test_reserve_limt_change_while_rebalance.py
new file mode 100644
index 000000000..2a7aacdac
--- /dev/null
+++ b/tests/functional/glusterd/test_reserve_limt_change_while_rebalance.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start,
+ rebalance_stop,
+ wait_for_rebalance_to_complete
+)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.io.utils import run_linux_untar
+
+
+@runs_on([['distributed-replicated'], ['glusterfs']])
+class TestReserveLimitChangeWhileRebalance(GlusterBaseClass):
+
+ def _set_vol_option(self, option):
+ """Method for setting volume option"""
+ ret = set_volume_options(
+ self.mnode, self.volname, option)
+ self.assertTrue(ret)
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Set I/O flag to false
+ cls.is_io_running = False
+
+ # Setup Volume and Mount Volume
+ ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+
+ def tearDown(self):
+ if not wait_for_rebalance_to_complete(
+ self.mnode, self.volname, timeout=300):
+ raise ExecutionError(
+ "Failed to complete rebalance on volume '{}'".format(
+ self.volname))
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume % s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_reserve_limt_change_while_rebalance(self):
+ """
+ 1) Create a distributed-replicated volume and start it.
+ 2) Enable storage.reserve option on the volume using below command,
+ gluster volume set storage.reserve 50
+ 3) Mount the volume on a client
+ 4) Add some data on the mount point (should be within reserve limits)
+ 5) Now, add-brick and trigger rebalance.
+ While rebalance is in-progress change the reserve limit to a lower
+ value say (30)
+ 6. Stop the rebalance
+ 7. Reset the storage reserve value to 50 as in step 2
+ 8. trigger rebalance
+ 9. while rebalance in-progress change the reserve limit to a higher
+ value say (70)
+ """
+
+ # Setting storage.reserve 50
+ self._set_vol_option({"storage.reserve": "50"})
+
+ self.list_of_io_processes = []
+ # Create a dir to start untar
+ self.linux_untar_dir = "{}/{}".format(self.mounts[0].mountpoint,
+ "linuxuntar")
+ ret = mkdir(self.clients[0], self.linux_untar_dir)
+ self.assertTrue(ret, "Failed to create dir linuxuntar for untar")
+
+ # Start linux untar on dir linuxuntar
+ ret = run_linux_untar(self.clients[0], self.mounts[0].mountpoint,
+ dirs=tuple(['linuxuntar']))
+ self.list_of_io_processes += ret
+ self.is_io_running = True
+
+ # Add bricks to the volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick with rsync on volume %s"
+ % self.volname)
+
+ # Trigger rebalance on the volume
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Setting storage.reserve 30
+ self._set_vol_option({"storage.reserve": "30"})
+
+ # Stopping Rebalance
+ ret, _, _ = rebalance_stop(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to stop rebalance on the volume %s"
+ % self.volname)
+
+ # Setting storage.reserve 500
+ self._set_vol_option({"storage.reserve": "500"})
+
+ # Trigger rebalance on the volume
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Setting storage.reserve 70
+ self._set_vol_option({"storage.reserve": "70"})
diff --git a/tests/functional/glusterd/test_reserved_port_range_for_gluster.py b/tests/functional/glusterd/test_reserved_port_range_for_gluster.py
new file mode 100644
index 000000000..b03c74884
--- /dev/null
+++ b/tests/functional/glusterd/test_reserved_port_range_for_gluster.py
@@ -0,0 +1,152 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Setting reserved port range for gluster
+"""
+
+from random import choice
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.volume_ops import (volume_create, volume_start,
+ get_volume_list)
+from glustolibs.gluster.volume_libs import cleanup_volume
+from glustolibs.gluster.lib_utils import get_servers_bricks_dict
+from glustolibs.gluster.gluster_init import restart_glusterd
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
+
+
+class TestReservedPortRangeForGluster(GlusterBaseClass):
+ def tearDown(self):
+ # Reset port range if some test fails
+ if self.port_range_changed:
+ cmd = "sed -i 's/49200/60999/' /etc/glusterfs/glusterd.vol"
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to set the max-port back to"
+ " 60999 in glusterd.vol file")
+
+ # clean up all volumes
+ vol_list = get_volume_list(self.mnode)
+ if vol_list is None:
+ raise ExecutionError("Failed to get the volume list")
+
+ for volume in vol_list:
+ ret = cleanup_volume(self.mnode, volume)
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
+
+ # Calling baseclass tearDown method
+ self.get_super_method(self, 'tearDown')()
+
+ def test_reserved_port_range_for_gluster(self):
+ """
+ Test Case:
+ 1) Set the max-port option in glusterd.vol file to 49200
+ 2) Restart glusterd on one of the node
+ 3) Create 50 volumes in a loop
+ 4) Try to start the 50 volumes in a loop
+ 5) Confirm that the 50th volume failed to start
+ 6) Confirm the error message, due to which volume failed to start
+ 7) Set the max-port option in glusterd.vol file back to default value
+ 8) Restart glusterd on the same node
+ 9) Starting the 50th volume should succeed now
+ """
+ # Set max port number as 49200 in glusterd.vol file
+ cmd = "sed -i 's/60999/49200/' /etc/glusterfs/glusterd.vol"
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to set the max-port to 49200 in"
+ " glusterd.vol file")
+
+ self.port_range_changed = True
+
+ # Restart glusterd
+ ret = restart_glusterd(self.mnode)
+ self.assertTrue(ret, "Failed to restart glusterd")
+ g.log.info("Successfully restarted glusterd on node: %s", self.mnode)
+
+ # Check node on which glusterd was restarted is back to 'Connected'
+ # state from any other peer
+ ret = wait_for_peers_to_connect(self.servers[1], self.servers)
+ self.assertTrue(ret, "All the peers are not in connected state")
+
+ # Fetch the available bricks dict
+ bricks_dict = get_servers_bricks_dict(self.servers,
+ self.all_servers_info)
+ self.assertIsNotNone(bricks_dict, "Failed to get the bricks dict")
+
+ # Create 50 volumes in a loop
+ for i in range(1, 51):
+ self.volname = "volume-%d" % i
+ bricks_list = []
+ j = 0
+ for key, value in bricks_dict.items():
+ j += 1
+ brick = choice(value)
+ brick = "{}:{}/{}_brick-{}".format(key, brick,
+ self.volname, j)
+ bricks_list.append(brick)
+
+ ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
+ self.assertEqual(ret, 0, "Failed to create volume: %s"
+ % self.volname)
+ g.log.info("Successfully created volume: %s", self.volname)
+
+ # Try to start 50 volumes in loop
+ for i in range(1, 51):
+ self.volname = "volume-%d" % i
+ ret, _, err = volume_start(self.mnode, self.volname)
+ if ret:
+ break
+ g.log.info("Successfully started all the volumes until volume: %s",
+ self.volname)
+
+ # Confirm if the 50th volume failed to start
+ self.assertEqual(i, 50, "Failed to start the volumes volume-1 to"
+ " volume-49 in a loop")
+
+ # Confirm the error message on volume start fail
+ err_msg = ("volume start: volume-50: failed: Commit failed on"
+ " localhost. Please check log file for details.")
+ self.assertEqual(err.strip(), err_msg, "Volume start failed with"
+ " a different error message")
+
+ # Confirm the error message from the log file
+ cmd = ("cat /var/log/glusterfs/glusterd.log | %s"
+ % "grep -i 'All the ports in the range are exhausted' | wc -l")
+ ret, out, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to 'grep' the glusterd.log file")
+ self.assertNotEqual(out, "0", "Volume start didn't fail with expected"
+ " error message")
+
+ # Set max port number back to default value in glusterd.vol file
+ cmd = "sed -i 's/49200/60999/' /etc/glusterfs/glusterd.vol"
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to set the max-port back to 60999 in"
+ " glusterd.vol file")
+
+ self.port_range_changed = False
+
+ # Restart glusterd on the same node
+ ret = restart_glusterd(self.mnode)
+ self.assertTrue(ret, "Failed to restart glusterd")
+ g.log.info("Successfully restarted glusterd on node: %s", self.mnode)
+
+ # Starting the 50th volume should succeed now
+ self.volname = "volume-%d" % i
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start volume: %s" % self.volname)
diff --git a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
index c1288f9fe..f4f082311 100644
--- a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
+++ b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,8 +18,9 @@
Test restart glusterd while rebalance is in progress
"""
-from time import sleep
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import form_bricks_list_to_add_brick
@@ -27,7 +28,10 @@ from glustolibs.gluster.brick_ops import add_brick
from glustolibs.gluster.rebalance_ops import (rebalance_start,
get_rebalance_status)
from glustolibs.gluster.gluster_init import (restart_glusterd,
- is_glusterd_running)
+ wait_for_glusterd_to_start,
+ is_glusterd_running,
+ start_glusterd)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.io.utils import validate_io_procs
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.gluster.glusterdir import get_dir_contents
@@ -40,16 +44,14 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.counter = 1
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -83,24 +85,25 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
self.volname)
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
def tearDown(self):
"""
tearDown for every test
"""
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ g.log.info("Glusterd started successfully on %s", self.servers)
# checking for peer status from every node
- count = 0
- while count < 80:
- ret = self.validate_peers_are_connected()
- if ret:
- break
- sleep(2)
- count += 1
-
- if not ret:
- raise ExecutionError("Servers are not in peer probed state")
+ for server in self.servers:
+ ret = wait_for_peers_to_connect(server, self.servers)
+ if not ret:
+ raise ExecutionError("Servers are not in peer probed state")
# unmounting the volume and Cleaning up the volume
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
@@ -127,7 +130,7 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 4 "
"--dir-length 6 "
@@ -176,13 +179,7 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
g.log.info("Glusterd restarted successfully on %s", self.servers)
# Checking glusterd status
- count = 0
- while count < 60:
- ret = is_glusterd_running(self.servers)
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "Glusterd is not running on some of the "
- "servers")
+ ret = wait_for_glusterd_to_start(self.servers)
+ self.assertTrue(ret, "Glusterd is not running on some of the "
+ "servers")
g.log.info("Glusterd is running on all servers %s", self.servers)
diff --git a/tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py b/tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py
new file mode 100644
index 000000000..f1a523ce6
--- /dev/null
+++ b/tests/functional/glusterd/test_self_heal_quota_daemon_after_reboot.py
@@ -0,0 +1,176 @@
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description:
+ Test Cases in this module related to test self heal
+ deamon and quota daemon status after reboot.
+"""
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.quota_ops import quota_enable, quota_limit_usage
+from glustolibs.gluster.volume_ops import get_volume_status
+from glustolibs.misc.misc_libs import reboot_nodes_and_wait_to_come_online
+from glustolibs.gluster.gluster_init import (is_glusterd_running,
+ start_glusterd,
+ wait_for_glusterd_to_start)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
+
+
+@runs_on([['replicated', 'distributed-replicated'], ['glusterfs']])
+class TestSelfHealDeamonQuotaDeamonAfterReboot(GlusterBaseClass):
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+
+ # checking for peer status from every node
+ ret = self.validate_peers_are_connected()
+ if not ret:
+ raise ExecutionError("Servers are not in peer probed state")
+
+ # Creating Volume
+ g.log.info("Started creating volume")
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Volume creation failed: %s" % self.volname)
+ g.log.info("Volume created successfully : %s", self.volname)
+
+ def tearDown(self):
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ g.log.info("Glusterd started successfully on %s", self.servers)
+
+ # checking for peer status from every node
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Peers are not in connected state")
+
+ # stopping the volume and Cleaning up the volume
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Cleanup the Volume %s"
+ % self.volname)
+ g.log.info("Volume deleted successfully %s", self.volname)
+
+ def is_daemon_process_running(self):
+ """
+ function for checking daemon process.
+ """
+ vol_status_shd_pid_list = []
+ vol_status_quotad_pid_list = []
+ g.log.info("Total self-heal and quota daemon process should be %d for "
+ "%d nodes", len(self.servers) * 2, len(self.servers))
+
+ # Getting vol status in dictonary format
+ vol_status = get_volume_status(self.mnode, self.volname)
+
+ # Getting self-heal daemon and quota daemon pids of every host from
+ # gluster volume status
+ for server in self.servers:
+ vol_status_quotad_pid_list.append(
+ vol_status[self.volname][server]['Quota Daemon']['pid'])
+ vol_status_shd_pid_list.append(
+ vol_status[self.volname][server]['Self-heal Daemon']['pid'])
+
+ g.log.info("shd list from get volume status: %s",
+ vol_status_shd_pid_list)
+ g.log.info("quotad list from get volume status: %s",
+ vol_status_quotad_pid_list)
+
+ sh_daemon_list = []
+ quotad_list = []
+
+ # Finding and Storing all hosts self heal daemon
+ # in to sh_daemon_list, all
+ # host quota daemon into quotad_list list using ps command
+ for daemon_name, daemon_list in (('glustershd', sh_daemon_list),
+ ('quotad', quotad_list)):
+ for host in self.servers:
+ cmd = "ps -eaf |grep %s |grep -v grep | awk '{ print $2 }'" % (
+ daemon_name)
+ ret, out, err = g.run(host, cmd)
+ err_msg = (
+ "Failed to find '%s' daemon on the '%s' host using "
+ "'ps -eaf' command.\nret: %s\nout: %s\nerr: %s" % (
+ daemon_name, host, ret, out, err)
+ )
+ self.assertEqual(ret, 0, err_msg)
+ daemon_list.append(out.strip())
+
+ g.log.info("shd list :%s", sh_daemon_list)
+ g.log.info("quotad list :%s", quotad_list)
+
+ # Checking in all hosts quota daemon and self heal daemon is
+ # running or not
+ # Here comparing the list of daemons got from ps command and
+ # list of daemons got from vol status command,
+ # all daemons should match from both the list
+ if sorted(sh_daemon_list + quotad_list) == sorted(
+ vol_status_shd_pid_list + vol_status_quotad_pid_list):
+ return len(sh_daemon_list + quotad_list) == len(self.servers) * 2
+
+ return False
+
+ def test_daemons_after_reboot(self):
+ '''
+ Creating volume then performing FUSE mount
+ then enable quota to that volume, then set quota
+ limit to that volume then perform a reboot and check
+ the selfheal daemon and quota daemon running or not
+ after reboot
+ '''
+
+ # Enabling quota to volume
+ ret, _, _ = quota_enable(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable quota on volume : "
+ "%s" % self.volname)
+ g.log.info("quota enabled successfully on volume: %s", self.volname)
+
+ # Setting quota limit to volume
+ ret, _, _ = quota_limit_usage(
+ self.mnode,
+ self.volname,
+ path='/',
+ limit='1GB',
+ soft_limit='')
+ self.assertEqual(ret, 0, "Quota limit set failed "
+ "on volume : %s" % self.volname)
+
+ ret, _ = reboot_nodes_and_wait_to_come_online(self.servers[1])
+ self.assertTrue(ret, "Failed to reboot the node %s"
+ % self.servers[1])
+ g.log.info("Node %s rebooted successfully", self.servers[1])
+
+ # Checking glusterd status and peer status afte reboot of server
+ self.assertTrue(
+ wait_for_glusterd_to_start(self.servers[1]),
+ "Failed to start glusterd on %s" % self.servers[1])
+ self.assertTrue(
+ wait_for_peers_to_connect(self.mnode, self.servers),
+ "some peers are not in connected state")
+ g.log.info("glusterd is running and all peers are in "
+ "connected state")
+
+ # Checks self heal daemon and quota daemon process running or not
+ ret = self.is_daemon_process_running()
+ self.assertTrue(ret, "failed to run self-heal and quota daemon "
+ "processs on all hosts")
+ g.log.info("self-heal and quota daemons are running on all "
+ "hosts successfully")
diff --git a/tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py b/tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py
index 1fb3b5171..c1f11f3a2 100644
--- a/tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py
+++ b/tests/functional/glusterd/test_setting_volume_option_when_one_node_is_down_in_cluster.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,8 +22,10 @@ from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.volume_ops import set_volume_options, get_volume_info
-from glustolibs.gluster.gluster_init import start_glusterd, is_glusterd_running
+from glustolibs.gluster.volume_ops import (
+ set_volume_options, get_volume_info)
+from glustolibs.gluster.gluster_init import (
+ start_glusterd, wait_for_glusterd_to_start)
from glustolibs.gluster.volume_libs import setup_volume
from glustolibs.gluster.peer_ops import (peer_probe_servers,
peer_detach_servers,
@@ -34,8 +36,7 @@ from glustolibs.gluster.peer_ops import (peer_probe_servers,
class VolumeInfoSync(GlusterBaseClass):
def setUp(self):
-
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# check whether peers are in connected state
ret = self.validate_peers_are_connected()
@@ -59,7 +60,7 @@ class VolumeInfoSync(GlusterBaseClass):
raise ExecutionError("Failed to probe detached "
"servers %s" % self.servers)
- # stopping the volume and Cleaning up the volume
+ # stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to Cleanup the Volume %s"
@@ -67,7 +68,7 @@ class VolumeInfoSync(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_sync_functinality(self):
@@ -105,15 +106,9 @@ class VolumeInfoSync(GlusterBaseClass):
ret = start_glusterd(random_server)
self.assertTrue(ret, "Failed to start glusterd on %s" % random_server)
- count = 0
- while count < 60:
- ret = is_glusterd_running(random_server)
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "glusterd is not running on %s"
- % random_server)
+ ret = wait_for_glusterd_to_start(random_server)
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % random_server)
g.log.info("glusterd is started and running on %s", random_server)
# volume info should be synced across the cluster
@@ -153,15 +148,9 @@ class VolumeInfoSync(GlusterBaseClass):
ret = start_glusterd(random_server)
self.assertTrue(ret, "Failed to start glusterd on %s" % random_server)
- count = 0
- while count < 60:
- ret = is_glusterd_running(random_server)
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "glusterd is not running on %s"
- % random_server)
+ ret = wait_for_glusterd_to_start(random_server)
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % random_server)
g.log.info("glusterd is started and running on %s", random_server)
# peer status should be synced across the cluster
diff --git a/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py b/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
index d6c517923..64b968bb4 100644
--- a/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
+++ b/tests/functional/glusterd/test_setting_volume_option_with_more_than_4096_characters.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,22 +14,23 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.gluster.volume_libs import setup_volume
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import (restart_glusterd,
- is_glusterd_running)
+ wait_for_glusterd_to_start,
+ is_glusterd_running,
+ start_glusterd)
@runs_on([['distributed'], ['glusterfs']])
class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
def setUp(self):
-
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# check whether peers are in connected state
ret = self.validate_peers_are_connected()
@@ -38,16 +39,17 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
def tearDown(self):
- count = 0
- while count < 60:
- ret = self.validate_peers_are_connected()
- if ret:
- break
- sleep(2)
- count += 1
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ g.log.info("Glusterd started successfully on %s", self.servers)
- if not ret:
- raise ExecutionError("Peers are not in connected state")
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ self.assertTrue(ret, "glusterd is not connected %s with peer %s"
+ % (self.servers, self.servers))
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
@@ -56,7 +58,7 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
% self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_setting_vol_option_with_max_characters(self):
@@ -92,11 +94,9 @@ class TestVolumeOptionSetWithMaxcharacters(GlusterBaseClass):
ret = restart_glusterd(self.mnode)
self.assertTrue(ret, "Failed to restart the glusterd on %s"
% self.mnode)
- count = 0
- while count < 60:
- ret = is_glusterd_running(self.mnode)
- if not ret:
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "glusterd is not running on %s" % self.mnode)
+
+ ret = wait_for_glusterd_to_start(self.servers)
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % self.servers)
+ g.log.info("Glusterd start on the nodes : %s "
+ "succeeded", self.servers)
diff --git a/tests/functional/glusterd/test_shared_storage.py b/tests/functional/glusterd/test_shared_storage.py
new file mode 100644
index 000000000..63e996fc6
--- /dev/null
+++ b/tests/functional/glusterd/test_shared_storage.py
@@ -0,0 +1,247 @@
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description:
+ Test cases in this module related glusterd enabling and
+ disabling shared storage
+"""
+
+from random import choice
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.shared_storage_ops import (enable_shared_storage,
+ is_shared_volume_mounted,
+ disable_shared_storage,
+ check_gluster_shared_volume)
+from glustolibs.gluster.volume_ops import (volume_create,
+ volume_delete, get_volume_list)
+from glustolibs.gluster.volume_libs import cleanup_volume
+from glustolibs.misc.misc_libs import reboot_nodes_and_wait_to_come_online
+
+
+@runs_on([['distributed'], ['glusterfs', 'nfs']])
+class SharedStorage(GlusterBaseClass):
+
+ def setUp(self):
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ # Creating Volume
+ if not self.setup_volume():
+ raise ExecutionError("Volume creation failed")
+
+ def tearDown(self):
+ # Stopping and cleaning up the volume
+ vol_list = get_volume_list(self.mnode)
+ if vol_list is None:
+ raise ExecutionError("Failed to get volume list")
+
+ for volume in vol_list:
+ if not cleanup_volume(self.mnode, volume):
+ raise ExecutionError("Failed Cleanup the Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _enable_and_check_shared_storage(self):
+ """Enable and check shared storage is present"""
+
+ ret = enable_shared_storage(self.mnode)
+ self.assertTrue(ret, ("Failed to enable a shared storage"))
+ g.log.info("Successfully enabled: enable-shared-storage option")
+
+ # Check volume list to confirm gluster_shared_storage is created
+ ret = check_gluster_shared_volume(self.mnode)
+ self.assertTrue(ret, ("gluster_shared_storage volume not"
+ " created even after enabling it"))
+ g.log.info("gluster_shared_storage volume created"
+ " successfully")
+
+ def _disable_and_check_shared_storage(self):
+ """Disable a shared storage without specifying the domain and check"""
+
+ ret = disable_shared_storage(self.mnode)
+ self.assertTrue(ret, ("Failed to disable a shared storage"))
+ g.log.info("Successfully disabled: disable-shared-storage")
+
+ # Check volume list to confirm gluster_shared_storage is deleted
+ ret = check_gluster_shared_volume(self.mnode, present=False)
+ self.assertTrue(ret, ("gluster_shared_storage volume not"
+ " deleted even after disabling it"))
+ g.log.info("gluster_shared_storage volume deleted"
+ " successfully")
+
+ def _is_shared_storage_mounted_on_the_nodes(self, brick_details, mounted):
+ """
+ Checks if the shared storage is mounted on the nodes where it is
+ created.
+ """
+ for node in brick_details:
+ ret = is_shared_volume_mounted(node.split(":")[0])
+ if mounted:
+ self.assertTrue(ret, ("Shared volume not mounted even after"
+ " enabling it"))
+ g.log.info("Shared volume mounted successfully")
+ else:
+ self.assertFalse(ret, ("Shared volume not unmounted even"
+ " after disabling it"))
+ g.log.info("Shared volume unmounted successfully")
+
+ def _get_all_bricks(self):
+ """Get all bricks where the shared storage is mounted"""
+
+ brick_list = get_all_bricks(self.mnode, "gluster_shared_storage")
+ self.assertIsNotNone(brick_list, "Unable to fetch brick list of shared"
+ " storage")
+ return brick_list
+
+ def _shared_storage_test_without_node_reboot(self):
+ """Shared storge testcase till the node reboot scenario"""
+
+ # Enable shared storage and check it is present on the cluster
+ self._enable_and_check_shared_storage()
+
+ # Get all the bricks where shared storage is mounted
+ brick_list = self._get_all_bricks()
+
+ # Check the shared volume is mounted on the nodes where it is created
+ self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
+ mounted=True)
+ # Disable shared storage and check it is not present on the cluster
+ self._disable_and_check_shared_storage()
+
+ # Check the shared volume is unmounted on the nodes where it is created
+ self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
+ mounted=False)
+
+ # Create a volume with name gluster_shared_storage
+ volume = "gluster_shared_storage"
+ bricks_list = form_bricks_list(self.mnode, volume, 2, self.servers,
+ self.all_servers_info)
+ count = 0
+ while count < 20:
+ ret, _, _ = volume_create(self.mnode, volume, bricks_list, True)
+ if not ret:
+ break
+ sleep(2)
+ count += 1
+ self.assertEqual(ret, 0, "Failed to create volume")
+ g.log.info("Volume create is success")
+
+ # Disable the shared storage should fail
+ ret = disable_shared_storage(self.mnode)
+ self.assertFalse(ret, ("Unexpected: Successfully disabled"
+ " shared-storage"))
+ g.log.info("Volume set: failed as expected")
+
+ # Check volume list to confirm gluster_shared_storage
+ # is not deleted which was created before
+ vol_list = get_volume_list(self.mnode)
+ _rc = False
+ for vol in vol_list:
+ if vol == "gluster_shared_storage":
+ _rc = True
+ break
+ self.assertTrue(_rc, ("gluster_shared_storage volume got"
+ " deleted after disabling it"))
+ g.log.info("gluster_shared_storage volume not deleted as "
+ " expected after disabling enable-shared-storage")
+
+ # Delete the volume created
+ ret = volume_delete(self.mnode, volume)
+ self.assertTrue(ret, ("Failed to cleanup the Volume "
+ "%s", volume))
+ g.log.info("Volume deleted successfully : %s", volume)
+
+ # Enable shared storage and check it is present on the cluster
+ self._enable_and_check_shared_storage()
+
+ # Check the shared volume is mounted on the nodes where it is created
+ self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
+ mounted=True)
+
+ # Disable shared storage and check it is not present on the cluster
+ self._disable_and_check_shared_storage()
+
+ # Check the shared volume is unmounted on the nodes where it is created
+ self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
+ mounted=False)
+
+ def test_shared_storage(self):
+ """
+ This test case includes:
+ -> Enable a shared storage
+ -> Disable a shared storage
+ -> Create volume of any type with
+ name gluster_shared_storage
+ -> Disable the shared storage
+ -> Check, volume created in step-3 is
+ not deleted
+ -> Delete the volume
+ -> Enable the shared storage
+ -> Check volume with name gluster_shared_storage
+ is created
+ -> Disable the shared storage
+ -> Enable shared storage and validate whether it is mounted
+ -> Perform node reboot
+ -> Post reboot validate the bricks are mounted back or not
+ """
+ # pylint: disable=too-many-statements, too-many-branches
+ self._shared_storage_test_without_node_reboot()
+
+ # Enable shared storage and check it is present on the cluster
+ self._enable_and_check_shared_storage()
+
+ # Get all the bricks where shared storage is mounted
+ brick_list = self._get_all_bricks()
+
+ # Check the shared volume is mounted on the nodes where it is created
+ self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
+ mounted=True)
+
+ # Perform node reboot on any of the nodes where the shared storage is
+ # mounted
+ node_to_reboot = choice(brick_list)
+ node_to_reboot = node_to_reboot.split(":")[0]
+ ret = reboot_nodes_and_wait_to_come_online(node_to_reboot)
+ self.assertTrue(ret, "Reboot Failed on node: "
+ "{}".format(node_to_reboot))
+ g.log.info("Node: %s rebooted successfully", node_to_reboot)
+
+ # Post reboot checking peers are connected
+ count = 0
+ while count < 10:
+ ret = self.validate_peers_are_connected()
+ if ret:
+ break
+ sleep(3)
+ count += 1
+ self.assertTrue(ret, "Peers are not in connected state.")
+
+ # Check the shared volume is mounted on the nodes where it is created
+ self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
+ mounted=True)
+
+ # Disable shared storage and check it is not present on the cluster
+ self._disable_and_check_shared_storage()
+
+ # Check the shared volume is unmounted on the nodes where it is created
+ self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
+ mounted=False)
diff --git a/tests/functional/glusterd/test_status_string_in_volstatus.py b/tests/functional/glusterd/test_status_string_in_volstatus.py
index 0382e7212..b43fb90e3 100644
--- a/tests/functional/glusterd/test_status_string_in_volstatus.py
+++ b/tests/functional/glusterd/test_status_string_in_volstatus.py
@@ -37,7 +37,7 @@ class TestTaskTypeAndStatus(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
ret = self.setup_volume()
@@ -57,7 +57,7 @@ class TestTaskTypeAndStatus(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_status_string(self):
'''
diff --git a/tests/functional/glusterd/test_updates_in_options_file_on_quorum_changes.py b/tests/functional/glusterd/test_updates_in_options_file_on_quorum_changes.py
new file mode 100644
index 000000000..98a3ba53f
--- /dev/null
+++ b/tests/functional/glusterd/test_updates_in_options_file_on_quorum_changes.py
@@ -0,0 +1,94 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Test Description:
+ Tests to check the 'options' file is updated with quorum changes
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import set_volume_options
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed',
+ 'arbiter', 'distributed-arbiter'], ['glusterfs']])
+class TestUpdatesInOptionsFileOnQuorumChanges(GlusterBaseClass):
+ def setUp(self):
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setting up Volume
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Volume creation/start failed: %s"
+ % self.volname)
+ g.log.info("Volme createdand started successfully : %s",
+ self.volname)
+
+ def tearDown(self):
+ # stopping the volume and Cleaning up the volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_updates_in_options_file_on_quorum_changes(self):
+ """
+ Test Case:
+ 1. Create and start a volume
+ 2. Check the output of '/var/lib/glusterd/options' file
+ 3. Store the value of 'global-option-version'
+ 4. Set server-quorum-ratio to 70%
+ 5. Check the output of '/var/lib/glusterd/options' file
+ 6. Compare the value of 'global-option-version' and check
+ if the value of 'server-quorum-ratio' is set to 70%
+ """
+ # Checking 'options' file for quorum related entries
+ cmd = "cat /var/lib/glusterd/options | grep global-option-version"
+ ret, out, _ = g.run(self.mnode, cmd)
+ previous_global_option_version = out.split('=')
+
+ # Setting Quorum ratio in percentage
+ self.quorum_perecent = {'cluster.server-quorum-ratio': '70%'}
+ ret = set_volume_options(self.mnode, 'all', self.quorum_perecent)
+ self.assertTrue(ret, "Failed to set cluster.server-quorum-ratio"
+ " option on volumes")
+ g.log.info("Successfully set cluster.server-quorum-ratio on cluster")
+
+ # Checking 'options' file for quorum related entries
+ cmd = "cat /var/lib/glusterd/options | grep global-option-version"
+ ret, out, _ = g.run(self.mnode, cmd)
+ new_global_option_version = out.split('=')
+ self.assertEqual(int(previous_global_option_version[1]) + 1,
+ int(new_global_option_version[1]),
+ "Failed:The global-option-version didn't change on a"
+ " volume set operation")
+ g.log.info("The global-option-version was successfully updated in the"
+ " options file")
+
+ cmd = "cat /var/lib/glusterd/options | grep server-quorum-ratio"
+ ret, out, _ = g.run(self.mnode, cmd)
+ out = out.split("%")
+ self.assertEqual(out[0], "cluster.server-quorum-ratio=70",
+ "Server-quorum-ratio is not updated in options file")
+ g.log.info("The cluster.server-quorum-ratio was successfully set"
+ " to 70 in the options file")
diff --git a/tests/functional/glusterd/test_validate_auth_allow_and_auth_reject.py b/tests/functional/glusterd/test_validate_auth_allow_and_auth_reject.py
new file mode 100644
index 000000000..f80b4357b
--- /dev/null
+++ b/tests/functional/glusterd/test_validate_auth_allow_and_auth_reject.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Test Description:
+ Tests to validate auth.allow and auth.reject on a volume
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import (set_volume_options,
+ volume_reset)
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.mount_ops import (mount_volume, umount_volume,
+ is_mounted)
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'arbiter',
+ 'distributed-arbiter'], ['glusterfs']])
+class TestValidateAuthAllowAndAuthReject(GlusterBaseClass):
+
+ def setUp(self):
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Volume creation failed: %s"
+ % self.volname)
+ g.log.info("Volume created successfully : %s", self.volname)
+
+ def tearDown(self):
+ # Cleanup the volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup the volume %s"
+ % self.volname)
+ g.log.info("Volume deleted successfully: %s", self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _set_option_and_mount_and_unmount_volumes(self, option="",
+ is_allowed=True):
+ """
+ Setting volume option and then mounting and unmounting the volume
+ """
+ # Check if an option is passed
+ if option:
+ # Setting the option passed as an argument
+ ret = set_volume_options(self.mnode, self.volname,
+ {option: self.mounts[0].client_system})
+ self.assertTrue(ret, "Failed to set %s option in volume: %s"
+ % (option, self.volname))
+ g.log.info("Successfully set %s option in volume: %s", option,
+ self.volname)
+
+ # Mounting a volume
+ ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
+ mpoint=self.mounts[0].mountpoint,
+ mserver=self.mnode,
+ mclient=self.mounts[0].client_system)
+
+ # Checking if volume was successfully mounted or not
+ ret = is_mounted(self.volname, mtype=self.mount_type,
+ mpoint=self.mounts[0].mountpoint,
+ mserver=self.mnode,
+ mclient=self.mounts[0].client_system)
+ if is_allowed:
+ self.assertTrue(ret, "Failed to mount the volume: %s"
+ % self.volname)
+ else:
+ self.assertFalse(ret, "Unexpected: Mounting"
+ " the volume %s was successful" % self.volname)
+
+ # Unmount only if the volume is supposed to be mounted
+ if is_allowed:
+ ret, _, _ = umount_volume(self.mounts[0].client_system,
+ self.mounts[0].mountpoint,
+ mtype=self.mount_type)
+ self.assertEqual(ret, 0, "Failed to unmount the volume: %s"
+ % self.volname)
+
+ def _reset_the_volume(self):
+ """
+ Resetting the volume
+ """
+ ret = volume_reset(self.mnode, self.volname)
+ self.assertTrue(ret, "Failed to reset volume: %s" % self.volname)
+ g.log.info("Reseting volume %s was successful", self.volname)
+
+ def _check_validate_test(self):
+ """
+ Checking volume mounting and unmounting with auth.allow
+ and auth.reject option set for it
+ """
+ # Setting auth.allow option and then mounting and unmounting volume
+ self._set_option_and_mount_and_unmount_volumes("auth.allow")
+ g.log.info("Successfully performed the set, mounting and unmounting"
+ " operation as expected on volume: %s", self.volname)
+
+ # Reseting the volume options
+ self._reset_the_volume()
+
+ # Setting auth.reject option and then checking mounting of volume
+ self._set_option_and_mount_and_unmount_volumes("auth.reject", False)
+ g.log.info("Successfully performed the set and mounting operation"
+ "as expected on volume: %s", self.volname)
+
+ # Reseting the volume options
+ self._reset_the_volume()
+
+ # Check mounting and unmounting of volume without setting any options
+ self._set_option_and_mount_and_unmount_volumes()
+ g.log.info("Successfully mounted and unmounted the volume: %s",
+ self.volname)
+
+ def test_validate_auth_allow_and_auth_reject(self):
+ """
+ Test Case:
+ 1. Create and start a volume
+ 2. Disable brick mutliplex
+ 2. Set auth.allow option on volume for the client address on which
+ volume is to be mounted
+ 3. Mount the volume on client and then unmmount it.
+ 4. Reset the volume
+ 5. Set auth.reject option on volume for the client address on which
+ volume is to be mounted
+ 6. Mounting the volume should fail
+ 7. Reset the volume and mount it on client.
+ 8. Repeat the steps 2-7 with brick multiplex enabled
+ """
+ # Setting cluster.brick-multiplex to disable
+ ret = set_volume_options(self.mnode, 'all',
+ {'cluster.brick-multiplex': 'disable'})
+ self.assertTrue(ret, "Failed to set brick-multiplex to enable.")
+ g.log.info("Successfully set brick-multiplex to disable.")
+
+ # Checking auth options with brick multiplex disabled
+ self._check_validate_test()
+
+ # Setting cluster.brick-multiplex to enable
+ ret = set_volume_options(self.mnode, 'all',
+ {'cluster.brick-multiplex': 'enable'})
+ self.assertTrue(ret, "Failed to set brick-multiplex to enable.")
+ g.log.info("Successfully set brick-multiplex to enable.")
+
+ # Checking auth options with brick multiplex enabled
+ self._check_validate_test()
diff --git a/tests/functional/glusterd/test_validate_glusterd_info.py b/tests/functional/glusterd/test_validate_glusterd_info.py
new file mode 100644
index 000000000..e888d5c03
--- /dev/null
+++ b/tests/functional/glusterd/test_validate_glusterd_info.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.peer_ops import get_peer_status
+
+
+class TestGlusterdInfo(GlusterBaseClass):
+
+ def test_validate_glusterd_info(self):
+ """
+ Steps:
+ 1. Check for the presence of /var/lib/glusterd/glusterd.info file
+ 2. Get the UUID of the current NODE
+ 3. check the value of the uuid returned by executing the command -
+ "gluster system:: uuid get "
+ 4. Check the uuid value shown by other node in the cluster
+ for the same node "gluster peer status"
+ on one node will give the UUID of the other node
+ """
+ uuid_list = []
+ for server in self.servers:
+
+ # Getting UUID from glusterd.info
+ g.log.info("Getting the UUID from glusterd.info")
+ ret, glusterd_volinfo, _ = g.run(
+ server, "grep -i uuid /var/lib/glusterd/glusterd.info")
+ uuid_list.append(glusterd_volinfo)
+ glusterd_volinfo = (glusterd_volinfo.split("="))[1]
+ self.assertFalse(
+ ret, "Failed to run '{}' on '{}' ".format(server, server))
+ self.assertIsNotNone(
+ glusterd_volinfo, "UUID not found in 'glusterd.info' file ")
+
+ # Getting UUID from cmd 'gluster system uuid get'
+ ret, get_uuid, _ = g.run(
+ server, "gluster system uuid get | awk {'print $2'}")
+ self.assertFalse(ret, "Unable to get the UUID ")
+ self.assertIsNotNone(get_uuid, "UUID not found")
+
+ # Checking if both the uuid are same
+ self.assertEquals(
+ glusterd_volinfo, get_uuid,
+ "UUID does not match in host {}".format(server))
+
+ # Geting the UUID from cmd "gluster peer status"
+ for node in self.servers:
+ for i in get_peer_status(node):
+ uuid_list.append(i["uuid"])
+ if server != node:
+ self.assertTrue(
+ get_uuid.replace("\n", "") in uuid_list,
+ "uuid not matched in {}".format(node))
+
+ def test_glusterd_config_file_check(self):
+ """
+ Steps:
+ 1. Check the location of glusterd socket file ( glusterd.socket )
+ ls /var/run/ | grep -i glusterd.socket
+ 2. systemctl is-enabled glusterd -> enabled
+
+ """
+
+ cmd = "ls /var/run/ | grep -i glusterd.socket"
+ ret, out, _ = g.run(self.mnode, cmd)
+
+ # Checking glusterd.socket file
+ self.assertFalse(
+ ret, "Failed to get glusterd.socket file on '{}'".format(
+ self.mnode))
+ self.assertEqual(
+ out.replace("\n", ""), "glusterd.socket",
+ "Failed to get expected output")
+
+ # Checking for glusterd.service is enabled by default
+ ret, out, _ = g.run(
+ self.mnode, "systemctl is-enabled glusterd.service")
+ self.assertFalse(
+ ret, "Failed to execute the cmd on {}".format(self.mnode))
+ self.assertEqual(
+ out.replace("\n", ""), "enabled",
+ "Output of systemctl is-enabled glusterd.service is not enabled")
diff --git a/tests/functional/glusterd/test_validate_peer_probe_ip_fqdn_hostname.py b/tests/functional/glusterd/test_validate_peer_probe_ip_fqdn_hostname.py
new file mode 100755
index 000000000..7c8fe3612
--- /dev/null
+++ b/tests/functional/glusterd/test_validate_peer_probe_ip_fqdn_hostname.py
@@ -0,0 +1,146 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from socket import gethostbyname, getfqdn
+from random import choice
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.peer_ops import (peer_probe, peer_detach,
+ peer_probe_servers,
+ peer_detach_servers,
+ nodes_from_pool_list)
+from glustolibs.gluster.exceptions import ExecutionError
+
+
+# pylint: disable=unsubscriptable-object
+class TestPeerProbeScenarios(GlusterBaseClass):
+
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+
+ # Performing peer detach
+ if not peer_detach_servers(self.mnode, self.servers):
+ raise ExecutionError("Failed to detach servers %s"
+ % self.servers)
+ g.log.info("Peer detach SUCCESSFUL.")
+ self.peers_in_pool = []
+ self.by_type = ""
+ self.node = None
+
+ def tearDown(self):
+ """Detach servers from cluster"""
+ pool = nodes_from_pool_list(self.mnode)
+ self.assertIsNotNone(pool, "Failed to get pool list")
+ for node in pool:
+ if not peer_detach(self.mnode, node):
+ raise ExecutionError("Failed to detach %s from %s"
+ % (node, self.mnode))
+ # Create a cluster
+ if not peer_probe_servers(self.mnode, self.servers):
+ raise ExecutionError("Failed to probe peer "
+ "servers %s" % self.servers)
+ g.log.info("Peer probe success for detached "
+ "servers %s", self.servers)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _get_node_identifiers(self):
+ """ Returns node address dict with ip, fqdn, hostname as keys """
+ node = {}
+ node['ip'] = gethostbyname(self.node)
+ node['fqdn'] = getfqdn(self.node)
+ node['hostname'] = g.run(self.node, "hostname")[1].strip()
+ return node
+
+ def _perform_peer_probe(self, peer):
+ """ Perfroms peer probe to a given node """
+ ret, _, err = peer_probe(self.mnode, peer)
+ self.assertEqual(ret, 0, "Failed to peer probe %s from %s. Error : %s"
+ % (peer, self.mnode, err))
+
+ def _get_new_nodes_to_peer_probe(self):
+ """ Selects a node randomly from the existing set of nodes """
+ self.node = None
+ while self.node is None:
+ self.node = (gethostbyname(choice(self.servers[1:]))
+ if gethostbyname(choice(self.servers)) not in
+ self.peers_in_pool else None)
+ self.peers_in_pool.append(self.node)
+
+ return self._get_node_identifiers()
+
+ def _verify_pool_list(self, node):
+ """ Verifies given nodes are there in the gluster pool list"""
+ pool_list = nodes_from_pool_list(self.mnode)
+ status = next((n for n in pool_list if n in node.values()), None)
+ self.assertIsNotNone(status, ("Node %s is not the pool list :"
+ " %s" %
+ (node[self.by_type], pool_list)))
+ g.log.info("The given node is there in the gluster pool list")
+
+ def _verify_cmd_history(self, node):
+ """Verifies cmd_history for successful entry of peer probe of nodes"""
+
+ # Extract the test specific cmds from cmd_hostory
+ start_msg = "Starting Test : %s : %s" % (self.id(),
+ self.glustotest_run_id)
+ end_msg = "Ending Test: %s : %s" % (self.id(), self.glustotest_run_id)
+ cmd_history_log = "/var/log/glusterfs/cmd_history.log"
+ cmd = "awk '/{}/ {{p=1}}; p; /{}/ {{p=0}}' {}".format(start_msg,
+ end_msg,
+ cmd_history_log)
+ ret, test_specific_cmd_history, err = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Failed to extract cmd_history specific to "
+ "the current test case. Error : %s" % err)
+ # Verify the cmd is found from the extracted cmd log
+ peer_probe_cmd = "peer probe {} : SUCCESS".format(node)
+ self.assertNotEqual(test_specific_cmd_history.count(peer_probe_cmd),
+ 0, "Peer probe success entry not found"
+ " in cmd history")
+ g.log.info("The command history contains a successful entry "
+ "of peer probe to %s ", node)
+
+ def test_validate_peer_probe(self):
+ """
+ 1. Add one of the node(HOST1-IP) to the other node(HOST2-IP) and
+ form the cluster
+ # gluster peer probe <HOST-IP>
+ 2. Check the return value of the 'peer probe' command
+ 3. Confirm that the cluster is formed successfully by 'peer status'
+ command
+ # gluster peer status
+ 4. Execute 'pool list' command to get the status of the cluster
+ including the local node itself
+ # gluster pool list
+ 5. Check the cmd_history' for the status message related to
+ 'peer probe' command
+ 6. Repeat 1-5 for FQDN and hostnames
+ """
+
+ for self.by_type in ('ip', 'fqdn', 'hostname'):
+ # Get a node to peer probe to
+ host_node = self._get_new_nodes_to_peer_probe()
+
+ # Perform peer probe and verify the status
+ self._perform_peer_probe(host_node[self.by_type])
+
+ # Verify Peer pool list and check whether the node exists or not
+ self._verify_pool_list(host_node)
+
+ # Verify command history for successful peer probe status
+ self._verify_cmd_history(host_node[self.by_type])
+
+ g.log.info("Peer probe scenario validated using %s", self.by_type)
diff --git a/tests/functional/glusterd/test_verify_df_output.py b/tests/functional/glusterd/test_verify_df_output.py
new file mode 100644
index 000000000..4eac9193b
--- /dev/null
+++ b/tests/functional/glusterd/test_verify_df_output.py
@@ -0,0 +1,171 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass,
+ runs_on)
+from glustolibs.gluster.heal_libs import monitor_heal_completion
+from glustolibs.io.utils import validate_io_procs
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_libs import (replace_brick_from_volume,
+ shrink_volume, expand_volume)
+from glustolibs.gluster.brick_libs import get_all_bricks
+
+
+@runs_on([['distributed-dispersed', 'distributed-replicated',
+ 'distributed-arbiter', 'dispersed', 'replicated',
+ 'arbiter'],
+ ['glusterfs']])
+class VerifyDFWithReplaceBrick(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Upload io scripts for running IO on mounts
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ if not upload_scripts(cls.clients, [cls.script_upload_path]):
+ raise ExecutionError("Failed to upload IO scripts to clients %s"
+ % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume and Mount Volume
+ if not self.setup_volume_and_mount_volume(mounts=self.mounts):
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ def _perform_io_and_validate(self):
+ """ Performs IO on the mount points and validates it"""
+ all_mounts_procs, count = [], 1
+ for mount_obj in self.mounts:
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d --dir-depth 2 "
+ "--dir-length 3 --max-num-of-dirs 3 "
+ "--num-of-files 2 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
+
+ # Validating IO's on mount point and waiting to complete
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("Successfully validated IO's")
+
+ def _replace_bricks_and_wait_for_heal_completion(self):
+ """ Replaces all the bricks and waits for the heal to complete"""
+ existing_bricks = get_all_bricks(self.mnode, self.volname)
+ for brick_to_replace in existing_bricks:
+ ret = replace_brick_from_volume(self.mnode, self.volname,
+ self.servers,
+ self.all_servers_info,
+ src_brick=brick_to_replace)
+ self.assertTrue(ret,
+ "Replace of %s failed" % brick_to_replace)
+ g.log.info("Replace of brick %s successful for volume %s",
+ brick_to_replace, self.volname)
+
+ # Monitor heal completion
+ ret = monitor_heal_completion(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal has not yet completed')
+ g.log.info('Heal has completed successfully')
+
+ def _get_mount_size_from_df_h_output(self):
+ """ Extracts the mount size from the df -h output"""
+
+ split_cmd = " | awk '{split($0,a,\" \");print a[2]}' | sed 's/.$//'"
+ cmd = ("cd {};df -h | grep {} {}".format(self.mounts[0].mountpoint,
+ self.volname, split_cmd))
+ ret, mount_size, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to extract mount size")
+ return float(mount_size.split("\n")[0])
+
+ def test_verify_df_output_when_brick_replaced(self):
+ """
+ - Take the output of df -h.
+ - Replace any one brick for the volumes.
+ - Wait till the heal is completed
+ - Repeat steps 1, 2 and 3 for all bricks for all volumes.
+ - Check if there are any inconsistencies in the output of df -h
+ - Remove bricks from volume and check output of df -h
+ - Add bricks to volume and check output of df -h
+ """
+
+ # Perform some IO on the mount point
+ self._perform_io_and_validate()
+
+ # Get the mount size from df -h output
+ initial_mount_size = self._get_mount_size_from_df_h_output()
+
+ # Replace all the bricks and wait till the heal completes
+ self._replace_bricks_and_wait_for_heal_completion()
+
+ # Get df -h output after brick replace
+ mount_size_after_replace = self._get_mount_size_from_df_h_output()
+
+ # Verify the mount point size remains the same after brick replace
+ self.assertEqual(initial_mount_size, mount_size_after_replace,
+ "The mount sizes before and after replace bricks "
+ "are not same")
+
+ # Add bricks
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info, force=True)
+ self.assertTrue(ret, "Failed to add-brick to volume")
+
+ # Get df -h output after volume expand
+ mount_size_after_expand = self._get_mount_size_from_df_h_output()
+
+ # Verify df -h output returns greater value
+ self.assertGreater(mount_size_after_expand, initial_mount_size,
+ "The mount size has not increased after expanding")
+
+ # Remove bricks
+ ret = shrink_volume(self.mnode, self.volname, force=True)
+ self.assertTrue(ret, ("Remove brick operation failed on "
+ "%s", self.volname))
+ g.log.info("Remove brick operation is successful on "
+ "volume %s", self.volname)
+
+ # Get df -h output after volume shrink
+ mount_size_after_shrink = self._get_mount_size_from_df_h_output()
+
+ # Verify the df -h output returns smaller value
+ self.assertGreater(mount_size_after_expand, mount_size_after_shrink,
+ "The mount size has not reduced after shrinking")
+
+ def tearDown(self):
+ """
+ Cleanup and umount volume
+ """
+ # Cleanup and umount volume
+ if not self.unmount_volume_and_cleanup_volume(mounts=self.mounts):
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass teardown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/glusterd/test_volume_create.py b/tests/functional/glusterd/test_volume_create.py
index 4046b8130..fe519acf0 100644
--- a/tests/functional/glusterd/test_volume_create.py
+++ b/tests/functional/glusterd/test_volume_create.py
@@ -41,7 +41,7 @@ class TestVolumeCreate(GlusterBaseClass):
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
ret = cls.validate_peers_are_connected()
if not ret:
@@ -73,7 +73,7 @@ class TestVolumeCreate(GlusterBaseClass):
raise ExecutionError("Unable to delete volume % s" % volume)
g.log.info("Volume deleted successfully : %s", volume)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_create(self):
'''
diff --git a/tests/functional/glusterd/test_volume_create_with_glusterd_restarts.py b/tests/functional/glusterd/test_volume_create_with_glusterd_restarts.py
index 6256e8535..1a7fe8a1b 100644
--- a/tests/functional/glusterd/test_volume_create_with_glusterd_restarts.py
+++ b/tests/functional/glusterd/test_volume_create_with_glusterd_restarts.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -36,6 +36,7 @@ class TestVolumeCreateWithGlusterdRestarts(GlusterBaseClass):
if ret:
break
sleep(3)
+ count += 1
# clean up volumes
ret = cleanup_volume(self.mnode, self.volname)
@@ -43,7 +44,7 @@ class TestVolumeCreateWithGlusterdRestarts(GlusterBaseClass):
raise ExecutionError("Unable to delete volume % s" % self.volname)
g.log.info("Volume deleted successfully : %s", self.volname)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_create_with_glusterd_restarts(self):
# pylint: disable=too-many-statements
@@ -70,10 +71,15 @@ class TestVolumeCreateWithGlusterdRestarts(GlusterBaseClass):
server_info_for_three_nodes)
# Restarting glusterd in a loop
restart_cmd = ("for i in `seq 1 5`; do "
- "service glusterd restart; sleep 3; "
+ "service glusterd restart; "
+ "systemctl reset-failed glusterd; "
+ "sleep 3; "
"done")
proc1 = g.run_async(self.servers[3], restart_cmd)
+ # After running restart in g.async adding 10 sec sleep
+ sleep(10)
+
# Creating volumes using 3 servers
ret, _, _ = volume_create(self.mnode, self.volname,
bricks_list)
@@ -90,15 +96,21 @@ class TestVolumeCreateWithGlusterdRestarts(GlusterBaseClass):
if ret:
break
sleep(3)
+ count += 1
self.assertTrue(ret, "Peers are not in connected state.")
g.log.info("Peers are in connected state.")
# Restarting glusterd in a loop
restart_cmd = ("for i in `seq 1 5`; do "
- "service glusterd restart; sleep 3; "
+ "service glusterd restart; "
+ "systemctl reset-failed glusted; "
+ "sleep 3; "
"done")
proc1 = g.run_async(self.servers[3], restart_cmd)
+ # After running restart in g.async adding 10 sec sleep
+ sleep(10)
+
# Start the volume created.
ret, _, _ = volume_start(self.mnode, self.volname)
self.assertEqual(ret, 0, "Volume start failed")
@@ -114,5 +126,6 @@ class TestVolumeCreateWithGlusterdRestarts(GlusterBaseClass):
if ret:
break
sleep(3)
+ count += 1
self.assertTrue(ret, "Peers are not in connected state.")
g.log.info("Peers are in connected state.")
diff --git a/tests/functional/glusterd/test_volume_delete.py b/tests/functional/glusterd/test_volume_delete.py
index e207bb4b0..6f885f9a8 100644
--- a/tests/functional/glusterd/test_volume_delete.py
+++ b/tests/functional/glusterd/test_volume_delete.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,10 +21,11 @@ from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import (cleanup_volume, get_volume_list,
setup_volume)
-from glustolibs.gluster.volume_ops import (volume_stop)
+from glustolibs.gluster.volume_ops import (volume_stop, volume_start)
from glustolibs.gluster.brick_libs import get_all_bricks
from glustolibs.gluster.gluster_init import stop_glusterd, start_glusterd
-from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected
+from glustolibs.gluster.peer_ops import (
+ peer_probe_servers, wait_for_peers_to_connect)
@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed',
@@ -35,7 +36,7 @@ class TestVolumeDelete(GlusterBaseClass):
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# check whether peers are in connected state
ret = cls.validate_peers_are_connected()
@@ -44,13 +45,17 @@ class TestVolumeDelete(GlusterBaseClass):
def tearDown(self):
+ # start the volume, it should succeed
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Volume stop failed")
+
# start glusterd on all servers
ret = start_glusterd(self.servers)
if not ret:
raise ExecutionError("Failed to start glusterd on all servers")
for server in self.servers:
- ret = is_peer_connected(server, self.servers)
+ ret = wait_for_peers_to_connect(server, self.servers)
if not ret:
ret = peer_probe_servers(server, self.servers)
if not ret:
@@ -68,13 +73,13 @@ class TestVolumeDelete(GlusterBaseClass):
raise ExecutionError("Unable to delete volume % s" % volume)
g.log.info("Volume deleted successfully : %s", volume)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
@classmethod
def tearDownClass(cls):
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDownClass.im_func(cls)
+ cls.get_super_method(cls, 'tearDownClass')()
def test_vol_delete_when_one_of_nodes_is_down(self):
diff --git a/tests/functional/glusterd/test_volume_get.py b/tests/functional/glusterd/test_volume_get.py
index bd4dd86ec..d38380f60 100644
--- a/tests/functional/glusterd/test_volume_get.py
+++ b/tests/functional/glusterd/test_volume_get.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@ from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import (get_volume_options,
set_volume_options)
from glustolibs.gluster.lib_utils import is_core_file_created
+from glustolibs.gluster.gluster_init import get_gluster_version
@runs_on([['distributed', 'replicated', 'distributed-replicated',
@@ -34,7 +35,7 @@ class TestVolumeGet(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
g.log.info("Started creating volume")
@@ -52,7 +53,7 @@ class TestVolumeGet(GlusterBaseClass):
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_get(self):
"""
@@ -175,8 +176,12 @@ class TestVolumeGet(GlusterBaseClass):
ret = get_volume_options(self.mnode, self.volname, "io-cache")
self.assertIsNotNone(ret, "gluster volume get %s io-cache command "
"failed" % self.volname)
- self.assertIn("on", ret['performance.io-cache'], "io-cache value "
- "is not correct")
+ if get_gluster_version(self.mnode) >= 6.0:
+ self.assertIn("off", ret['performance.io-cache'],
+ "io-cache value is not correct")
+ else:
+ self.assertIn("on", ret['performance.io-cache'],
+ "io-cache value is not correct")
g.log.info("io-cache value is correct")
# Performing gluster volume set volname performance.low-prio-threads
diff --git a/tests/functional/glusterd/test_volume_network_ping_timeout.py b/tests/functional/glusterd/test_volume_network_ping_timeout.py
index 7d72d8ab2..eef9604bc 100644
--- a/tests/functional/glusterd/test_volume_network_ping_timeout.py
+++ b/tests/functional/glusterd/test_volume_network_ping_timeout.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,7 +19,9 @@
# of the volume.
import re
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -36,17 +38,15 @@ from glustolibs.io.utils import collect_mounts_arequal
class CheckVolumeChecksumAfterChangingNetworkPingTimeOut(GlusterBaseClass):
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
g.log.info("Starting %s ", cls.__name__)
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -58,7 +58,7 @@ class CheckVolumeChecksumAfterChangingNetworkPingTimeOut(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
g.log.info("Started creating volume")
@@ -80,7 +80,7 @@ class CheckVolumeChecksumAfterChangingNetworkPingTimeOut(GlusterBaseClass):
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_checksum_after_changing_network_ping_timeout(self):
@@ -112,8 +112,10 @@ class CheckVolumeChecksumAfterChangingNetworkPingTimeOut(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_files -f 10 --base-file-name newfile %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python %s create_files -f 10 "
+ "--base-file-name newfile %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
diff --git a/tests/functional/glusterd/test_volume_operations.py b/tests/functional/glusterd/test_volume_operations.py
index 47807c6f9..23bcfe640 100644
--- a/tests/functional/glusterd/test_volume_operations.py
+++ b/tests/functional/glusterd/test_volume_operations.py
@@ -35,7 +35,7 @@ from glustolibs.gluster.exceptions import ExecutionError
class TestVolumeCreate(GlusterBaseClass):
def setUp(self):
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# check whether peers are in connected state
ret = self.validate_peers_are_connected()
if not ret:
@@ -54,7 +54,7 @@ class TestVolumeCreate(GlusterBaseClass):
raise ExecutionError("Unable to delete volume %s" % volume)
g.log.info("Volume deleted successfully : %s", volume)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_start_force(self):
diff --git a/tests/functional/glusterd/test_volume_reduce_replica.py b/tests/functional/glusterd/test_volume_reduce_replica.py
index 82a9a60cf..975651224 100644
--- a/tests/functional/glusterd/test_volume_reduce_replica.py
+++ b/tests/functional/glusterd/test_volume_reduce_replica.py
@@ -29,7 +29,7 @@ class TestVolumeReduceReplicaCount(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Override Volumes
cls.volume['voltype'] = {
@@ -60,7 +60,7 @@ class TestVolumeReduceReplicaCount(GlusterBaseClass):
raise ExecutionError("Failed to delete the brick "
"dir's of deleted volume")
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_reduce_replica_count(self):
"""
diff --git a/tests/functional/glusterd/test_volume_reset.py b/tests/functional/glusterd/test_volume_reset.py
index ca97ce588..078722e56 100644
--- a/tests/functional/glusterd/test_volume_reset.py
+++ b/tests/functional/glusterd/test_volume_reset.py
@@ -35,7 +35,7 @@ class GlusterdVolumeReset(GlusterBaseClass):
'''
@classmethod
def setUpClass(cls):
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
g.log.info("Starting %s ", cls.__name__)
# Creating Volume
@@ -51,7 +51,7 @@ class GlusterdVolumeReset(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# command for volume reset
g.log.info("started resetting volume")
@@ -67,7 +67,7 @@ class GlusterdVolumeReset(GlusterBaseClass):
tearDown for every test
"""
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
@classmethod
def tearDownClass(cls):
diff --git a/tests/functional/glusterd/test_volume_set_when_glusterd_stopped_on_one_node.py b/tests/functional/glusterd/test_volume_set_when_glusterd_stopped_on_one_node.py
new file mode 100644
index 000000000..d99fa185f
--- /dev/null
+++ b/tests/functional/glusterd/test_volume_set_when_glusterd_stopped_on_one_node.py
@@ -0,0 +1,193 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Volume set operation when glusterd is stopped on one node
+"""
+
+from random import choice
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_ops import (
+ set_volume_options, get_volume_info)
+from glustolibs.gluster.brick_libs import get_online_bricks_list
+from glustolibs.gluster.gluster_init import (
+ start_glusterd, stop_glusterd, wait_for_glusterd_to_start)
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.io.utils import validate_io_procs
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'], ['glusterfs']])
+class TestVolumeSetWhenGlusterdStoppedOnOneNode(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Uploading file_dir script in all client direcotries
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s"
+ % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+ # Creating Volume and mounting volume.
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+ g.log.info("Volme created and mounted successfully : %s",
+ self.volname)
+
+ def tearDown(self):
+ # Check if a node is still down
+ if self.glusterd_is_stopped:
+ ret = start_glusterd(self.random_server)
+ self.assertTrue(ret, "Failed to start glusterd on %s"
+ % self.random_server)
+ g.log.info("Successfully started glusterd on node: %s",
+ self.random_server)
+
+ # Waiting for glusterd to start completely
+ ret = wait_for_glusterd_to_start(self.random_server)
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % self.random_server)
+ g.log.info("glusterd is started and running on %s",
+ self.random_server)
+
+ # Unmounting and cleaning volume.
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Unable to delete volume % s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_volume_set_when_glusterd_stopped_on_one_node(self):
+ """
+ Test Case:
+ 1) Setup and mount a volume on client.
+ 2) Stop glusterd on a random server.
+ 3) Start IO on mount points
+ 4) Set an option on the volume
+ 5) Start glusterd on the stopped node.
+ 6) Verify all the bricks are online after starting glusterd.
+ 7) Check if the volume info is synced across the cluster.
+ """
+ # Fetching the bricks list and storing it for later use
+ list1 = get_online_bricks_list(self.mnode, self.volname)
+ self.assertIsNotNone(list1, "Failed to get the list of online bricks "
+ "for volume: %s" % self.volname)
+
+ # Fetching a random server from list.
+ self.random_server = choice(self.servers[1:])
+
+ # Stopping glusterd on one node.
+ ret = stop_glusterd(self.random_server)
+ self.assertTrue(ret, "Failed to stop glusterd on one node.")
+ g.log.info("Successfully stopped glusterd on one node.")
+
+ self.glusterd_is_stopped = True
+
+ # Start IO on mount points.
+ self.all_mounts_procs = []
+ counter = 1
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dir-depth 4 "
+ "--dir-length 6 "
+ "--dirname-start-num %d "
+ "--max-num-of-dirs 3 "
+ "--num-of-files 5 %s" % (
+ self.script_upload_path,
+ counter, mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ self.all_mounts_procs.append(proc)
+ counter += 1
+
+ # Validate IO
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
+ g.log.info("IO validation complete.")
+
+ # set a option on volume, stat-prefetch on
+ self.options = {"stat-prefetch": "on"}
+ ret = set_volume_options(self.mnode, self.volname, self.options)
+ self.assertTrue(ret, ("Failed to set option stat-prefetch to on"
+ "for the volume %s" % self.volname))
+ g.log.info("Succeeded in setting stat-prefetch option to on"
+ "for the volume %s", self.volname)
+
+ # start glusterd on the node where glusterd is stopped
+ ret = start_glusterd(self.random_server)
+ self.assertTrue(ret, "Failed to start glusterd on %s"
+ % self.random_server)
+ g.log.info("Successfully started glusterd on node: %s",
+ self.random_server)
+
+ # Waiting for glusterd to start completely
+ ret = wait_for_glusterd_to_start(self.random_server)
+ self.assertTrue(ret, "glusterd is not running on %s"
+ % self.random_server)
+ g.log.info("glusterd is started and running on %s", self.random_server)
+
+ self.glusterd_is_stopped = False
+
+ # Confirm if all the bricks are online or not
+ count = 0
+ while count < 10:
+ list2 = get_online_bricks_list(self.mnode, self.volname)
+ if list1 == list2:
+ break
+ sleep(2)
+ count += 1
+
+ self.assertListEqual(list1, list2, "Unexpected: All the bricks in the"
+ "volume are not online")
+ g.log.info("All the bricks in the volume are back online")
+
+ # volume info should be synced across the cluster
+ out1 = get_volume_info(self.mnode, self.volname)
+ self.assertIsNotNone(out1, "Failed to get the volume info from %s"
+ % self.mnode)
+ g.log.info("Getting volume info from %s is success", self.mnode)
+
+ count = 0
+ while count < 60:
+ out2 = get_volume_info(self.random_server, self.volname)
+ self.assertIsNotNone(out2, "Failed to get the volume info from %s"
+ % self.random_server)
+ if out1 == out2:
+ break
+ sleep(2)
+ count += 1
+
+ self.assertDictEqual(out1, out2, "Volume info is not synced in the"
+ "restarted node")
+ g.log.info("Volume info is successfully synced across the cluster")
diff --git a/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py b/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py
index 24865d245..41719da63 100644
--- a/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py
+++ b/tests/functional/glusterd/test_volume_set_with_quorum_enabled.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,6 +18,7 @@ from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import start_glusterd, stop_glusterd
@@ -29,7 +30,7 @@ class TestVolumeSetOpWithQuorum(GlusterBaseClass):
def setUp(self):
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
g.log.info("Started creating volume")
@@ -50,26 +51,11 @@ class TestVolumeSetOpWithQuorum(GlusterBaseClass):
g.log.info("Successfully started glusterd.")
# Checking if peer is connected.
- counter = 0
- while counter < 30:
- ret = self.validate_peers_are_connected()
- counter += 1
- if ret:
- break
- sleep(3)
- if not ret:
- raise ExecutionError("Peer is not in connected state.")
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ self.assertTrue(ret, "glusterd is not connected %s with peer %s"
+ % (self.mnode, self.servers))
g.log.info("Peers is in connected state.")
- # Setting Quorum ratio to 51%
- self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'}
- ret = set_volume_options(self.mnode, 'all', self.quorum_perecent)
- if not ret:
- raise ExecutionError("gluster volume set all cluster.server-quorum"
- "-ratio percentage Failed :%s" % self.servers)
- g.log.info("gluster volume set all cluster.server-quorum-ratio 51 "
- "percentage enabled successfully on :%s", self.servers)
-
# stopping the volume and Cleaning up the volume
ret = self.cleanup_volume()
if not ret:
@@ -77,7 +63,7 @@ class TestVolumeSetOpWithQuorum(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_set_wit_quorum_enabled(self):
# pylint: disable=too-many-statements
diff --git a/tests/functional/glusterd/test_volume_status.py b/tests/functional/glusterd/test_volume_status.py
index 109586f35..01f874558 100644
--- a/tests/functional/glusterd/test_volume_status.py
+++ b/tests/functional/glusterd/test_volume_status.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,7 +20,9 @@ IOs in progress
"""
import random
from time import sleep
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -34,7 +36,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.counter = 1
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# checking for peer status from every node
ret = cls.validate_peers_are_connected()
@@ -47,11 +49,9 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -63,7 +63,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
g.log.info("Started creating volume")
@@ -100,7 +100,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_status_inode_while_io_in_progress(self):
'''
@@ -129,7 +129,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 15 "
diff --git a/tests/functional/glusterd/test_volume_status_fd.py b/tests/functional/glusterd/test_volume_status_fd.py
index 415c96de7..0c60dd802 100644
--- a/tests/functional/glusterd/test_volume_status_fd.py
+++ b/tests/functional/glusterd/test_volume_status_fd.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,7 +20,9 @@
"""
import random
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -34,16 +36,14 @@ class VolumeStatusFdWhenIOInProgress(GlusterBaseClass):
@classmethod
def setUpClass(cls):
cls.counter = 1
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -55,7 +55,7 @@ class VolumeStatusFdWhenIOInProgress(GlusterBaseClass):
setUp method for every test
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Creating Volume
ret = self.setup_volume_and_mount_volume(self.mounts)
@@ -85,7 +85,7 @@ class VolumeStatusFdWhenIOInProgress(GlusterBaseClass):
g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_status_fd(self):
@@ -112,7 +112,7 @@ class VolumeStatusFdWhenIOInProgress(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
diff --git a/tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py b/tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py
new file mode 100644
index 000000000..05bb47c40
--- /dev/null
+++ b/tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py
@@ -0,0 +1,138 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Volume status when one of the brickpath is not available.
+"""
+
+import random
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.brick_libs import (are_bricks_online, get_all_bricks,
+ bring_bricks_online,
+ bring_bricks_offline,
+ are_bricks_offline)
+from glustolibs.gluster.volume_ops import (volume_start)
+
+
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'arbiter',
+ 'distributed-arbiter'], ['glusterfs']])
+class TestVolumeStatusShowBrickOnlineThoughBrickpathDeleted(GlusterBaseClass):
+ def setUp(self):
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Volume creation failed: %s"
+ % self.volname)
+ g.log.info("Volume created successfully : %s", self.volname)
+
+ def tearDown(self):
+ # Stopping the volume and Cleaning up the volume
+ if self.check_for_remount:
+ ret, _, _ = g.run(self.brick_node, 'mount %s' % self.node_brick)
+ if ret:
+ raise ExecutionError('Failed to remount brick %s'
+ % self.node_brick)
+ g.log.info('Successfully remounted %s with read-write option',
+ self.node_brick)
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup the volume %s"
+ % self.volname)
+ g.log.info("Volume deleted successfully: %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_volume_status_show_brick_online_though_brickpath_deleted(self):
+ """
+ Test Case:
+ 1) Create a volume and start it.
+ 2) Fetch the brick list
+ 3) Bring any one brick down umount the brick
+ 4) Force start the volume and check that all the bricks are not online
+ 5) Remount the removed brick and bring back the brick online
+ 6) Force start the volume and check if all the bricks are online
+ """
+ # Fetching the brick list
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ self.assertIsNotNone(brick_list, "Failed to get the bricks in"
+ " the volume")
+
+ # Bringing one brick down
+ random_brick = random.choice(brick_list)
+ ret = bring_bricks_offline(self.volname, random_brick)
+ self.assertTrue(ret, "Failed to bring offline")
+
+ # Creating a list of bricks to be removed
+ remove_bricks_list = []
+ remove_bricks_list.append(random_brick)
+
+ # Checking if the brick is offline or not
+ ret = are_bricks_offline(self.mnode, self.volname,
+ remove_bricks_list)
+ self.assertTrue(ret, 'Bricks %s are not offline'
+ % random_brick)
+ g.log.info('Brick %s is offline as expected', random_brick)
+
+ # umounting the brick which was made offline
+ self.brick_node, volume_brick = random_brick.split(':')
+ self.node_brick = '/'.join(volume_brick.split('/')[0:3])
+ g.log.info('Start umount brick %s...', self.node_brick)
+ ret, _, _ = g.run(self.brick_node, 'umount %s' % self.node_brick)
+ self.assertFalse(ret, 'Failed to umount brick %s' % self.node_brick)
+ g.log.info('Successfully umounted brick %s', self.node_brick)
+
+ self.check_for_remount = True
+
+ # Force starting the volume
+ ret, _, _ = volume_start(self.mnode, self.volname, True)
+ self.assertEqual(ret, 0, "Faile to force start volume")
+ g.log.info("Successfully force start volume")
+
+ # remounting the offline brick
+ g.log.info('Start remount brick %s with read-write option...',
+ self.node_brick)
+ ret, _, _ = g.run(self.brick_node, 'mount %s' % self.node_brick)
+ self.assertFalse(ret, 'Failed to remount brick %s' % self.node_brick)
+ g.log.info('Successfully remounted %s with read-write option',
+ self.node_brick)
+
+ self.check_for_remount = False
+
+ # Checking that all the bricks shouldn't be online
+ ret = are_bricks_online(self.mnode, self.volname, brick_list)
+ self.assertFalse(ret, "Unexpected: All the bricks are online")
+ g.log.info("Expected: All the bricks are not online")
+
+ # Bringing back the offline brick online
+ ret = bring_bricks_online(self.mnode, self.volname, remove_bricks_list)
+ self.assertTrue(ret, "Failed to bring bricks online")
+ g.log.info("Successfully brought bricks online")
+
+ # Force starting the volume
+ ret, _, _ = volume_start(self.mnode, self.volname, True)
+ self.assertEqual(ret, 0, "Faile to force start volume")
+ g.log.info("Successfully force start volume")
+
+ # Checking if all the bricks are online or not
+ ret = are_bricks_online(self.mnode, self.volname, brick_list)
+ self.assertTrue(ret, "Unexpected: All the bricks are not online")
+ g.log.info("Expected: All the bricks are online")
diff --git a/tests/functional/glusterd/test_volume_status_with_absent_bricks.py b/tests/functional/glusterd/test_volume_status_with_absent_bricks.py
index 9a35c2fef..5aed2af3b 100644
--- a/tests/functional/glusterd/test_volume_status_with_absent_bricks.py
+++ b/tests/functional/glusterd/test_volume_status_with_absent_bricks.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,63 +22,49 @@ import random
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.volume_ops import (volume_create, volume_start,
- volume_status)
-from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.volume_ops import (volume_start, volume_status)
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.volume_libs import cleanup_volume
-@runs_on([['distributed', 'replicated', 'distributed-replicated'],
- ['glusterfs']])
+@runs_on([['distributed', 'replicated', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'arbiter',
+ 'distributed-arbiter'], ['glusterfs']])
class TestVolumeStatusWithAbsentBricks(GlusterBaseClass):
+ def setUp(self):
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume
+ g.log.info("Started creating volume")
+ ret = self.setup_volume(False, True)
+ if ret:
+ g.log.info("Volme created successfully : %s", self.volname)
+ else:
+ raise ExecutionError("Volume creation failed: %s" % self.volname)
def tearDown(self):
- """
- tearDown for every test
- """
- # stopping the volume and Cleaning up the volume
- ret = self.cleanup_volume()
+ # Stopping the volume and Cleaning up the volume
+ ret = cleanup_volume(self.mnode, self.volname)
if not ret:
- raise ExecutionError("Failed Cleanup the Volume %s"
- % self.volname)
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Volume deleted successfully : %s", self.volname)
+
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_volume_absent_bricks(self):
- '''
- -> Create Volume
- -> Remove any one Brick directory
- -> Start Volume
- -> Check the gluster volume status
- '''
- num_of_bricks = 0
- replica = True
-
- if self.volume_type == 'distributed':
- num_of_bricks = 3
- replica = False
-
- elif self.volume_type == 'replicated':
- num_of_bricks = 3
-
- elif self.volume_type == 'distributed-replicated':
- num_of_bricks = 6
-
- # Forming brick list
- brick_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
- self.servers, self.all_servers_info)
- if replica:
- # Creating Volume
- ret, _, _ = volume_create(self.mnode, self.volname, brick_list,
- replica_count=3)
- self.assertEqual(ret, 0, "Volume creation failed for %s"
- % self.volname)
- g.log.info("volume created successfully %s", self.volname)
- else:
- # Creating Volume
- ret, _, _ = volume_create(self.mnode, self.volname, brick_list)
- self.assertEqual(ret, 0, "Volume creation failed for %s"
- % self.volname)
- g.log.info("volume created successfully %s", self.volname)
+ """
+ Test Case:
+ 1) Create Volume
+ 2) Remove any one Brick directory
+ 3) Start Volume and compare the failure message
+ 4) Check the gluster volume status nad compare the status message
+ """
+ # Fetching the brick list
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ self.assertIsNotNone(brick_list, "Failed to get the bricks in"
+ " the volume")
# Command for removing brick directory
random_brick = random.choice(brick_list)
diff --git a/tests/functional/glusterd/test_volume_status_xml.py b/tests/functional/glusterd/test_volume_status_xml.py
index 1f5475374..568d6906d 100644
--- a/tests/functional/glusterd/test_volume_status_xml.py
+++ b/tests/functional/glusterd/test_volume_status_xml.py
@@ -29,8 +29,7 @@ from glustolibs.gluster.peer_ops import (peer_probe_servers, peer_detach,
class TestVolumeStatusxml(GlusterBaseClass):
def setUp(self):
-
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# check whether peers are in connected state
ret = self.validate_peers_are_connected()
@@ -60,7 +59,23 @@ class TestVolumeStatusxml(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to probe detached "
"servers %s" % self.servers)
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
+
+ def _get_test_specific_glusterd_log(self, node):
+ """Gets the test specific glusterd log"""
+ # Extract the test specific cmds from cmd_hostory
+ start_msg = "Starting Test : %s : %s" % (self.id(),
+ self.glustotest_run_id)
+ end_msg = "Ending Test: %s : %s" % (self.id(),
+ self.glustotest_run_id)
+ glusterd_log = "/var/log/glusterfs/glusterd.log"
+ cmd = ("awk '/{}/ {{p=1}}; p; /{}/ {{p=0}}' {}"
+ .format(start_msg, end_msg, glusterd_log))
+ ret, test_specific_glusterd_log, err = g.run(node, cmd)
+ self.assertEqual(ret, 0, "Failed to extract glusterd log specific"
+ " to the current test case. "
+ "Error : %s" % err)
+ return test_specific_glusterd_log
def test_volume_status_xml(self):
@@ -110,3 +125,14 @@ class TestVolumeStatusxml(GlusterBaseClass):
self.assertIsNotNone(vol_status, ("Failed to get volume "
"status --xml for %s"
% self.volname))
+
+ # Verify there are no crashes while executing gluster volume status
+ status = True
+ glusterd_log = (self._get_test_specific_glusterd_log(self.mnode)
+ .split("\n"))
+ for line in glusterd_log:
+ if ' E ' in glusterd_log:
+ status = False
+ g.log.info("Unexpected! Error found %s", line)
+
+ self.assertTrue(status, "Error found in glusterd logs")
diff --git a/tests/functional/glusterd/test_xml_dump_of_gluster_volume_status_during_rebalance.py b/tests/functional/glusterd/test_xml_dump_of_gluster_volume_status_during_rebalance.py
new file mode 100644
index 000000000..5712dcf32
--- /dev/null
+++ b/tests/functional/glusterd/test_xml_dump_of_gluster_volume_status_during_rebalance.py
@@ -0,0 +1,185 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.gluster_init import (
+ stop_glusterd, start_glusterd,
+ is_glusterd_running
+)
+from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
+from glustolibs.gluster.rebalance_ops import (
+ get_rebalance_status,
+ rebalance_start
+)
+from glustolibs.gluster.volume_libs import (
+ cleanup_volume
+)
+from glustolibs.gluster.volume_ops import (
+ volume_stop, volume_create, volume_start, get_volume_status
+)
+from glustolibs.io.utils import (
+ list_all_files_and_dirs_mounts,
+ wait_for_io_to_complete
+)
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['distributed-replicated'], ['glusterfs']])
+class XmlDumpGlusterVolumeStatus(GlusterBaseClass):
+ """
+ xml Dump of gluster volume status during rebalance, when one gluster
+ node is down
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Setup Volume and Mount Volume
+ ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ # Start IO on mounts
+ cls.all_mounts_procs = []
+ for index, mount_obj in enumerate(cls.mounts, start=1):
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 1 "
+ "--dir-length 5 "
+ "--max-num-of-dirs 10 "
+ "--num-of-files 60 %s" % (
+ cls.script_upload_path,
+ index + 10, mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ cls.all_mounts_procs.append(proc)
+ cls.io_validation_complete = False
+
+ # Wait for IO to complete
+ if not cls.io_validation_complete:
+ g.log.info("Wait for IO to complete")
+ ret = wait_for_io_to_complete(cls.all_mounts_procs, cls.mounts)
+ if not ret:
+ raise ExecutionError("IO failed on some of the clients")
+
+ ret = list_all_files_and_dirs_mounts(cls.mounts)
+ if not ret:
+ raise ExecutionError("Failed to list all files and dirs")
+
+ def test_xml_dump_of_gluster_volume_status_during_rebalance(self):
+ """
+ 1. Create a trusted storage pool by peer probing the node
+ 2. Create a distributed-replicated volume
+ 3. Start the volume and fuse mount the volume and start IO
+ 4. Create another replicated volume and start it and stop it
+ 5. Start rebalance on the volume
+ 6. While rebalance in progress, stop glusterd on one of the nodes
+ in the Trusted Storage pool.
+ 7. Get the status of the volumes with --xml dump
+ """
+ self.volname_2 = "test_volume_2"
+
+ # create volume
+ # Fetching all the parameters for volume_create
+ list_of_three_servers = []
+ server_info_for_three_nodes = {}
+ for server in self.servers[:3]:
+ list_of_three_servers.append(server)
+ server_info_for_three_nodes[server] = self.all_servers_info[
+ server]
+
+ bricks_list = form_bricks_list(self.mnode, self.volname,
+ 3, list_of_three_servers,
+ server_info_for_three_nodes)
+ # Creating volumes using 3 servers
+ ret, _, _ = volume_create(self.mnode, self.volname_2,
+ bricks_list, force=True)
+ self.assertFalse(ret, "Volume creation failed")
+ g.log.info("Volume %s created successfully", self.volname_2)
+ ret, _, _ = volume_start(self.mnode, self.volname_2)
+ self.assertFalse(
+ ret, "Failed to start volume {}".format(self.volname_2))
+ ret, _, _ = volume_stop(self.mnode, self.volname_2)
+ self.assertFalse(
+ ret, "Failed to stop volume {}".format(self.volname_2))
+
+ # Start Rebalance
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+
+ # Get rebalance status
+ status_info = get_rebalance_status(self.mnode, self.volname)
+ status = status_info['aggregate']['statusStr']
+
+ self.assertIn('in progress', status,
+ "Rebalance process is not running")
+ g.log.info("Rebalance process is running")
+
+ # Stop glusterd
+ ret = stop_glusterd(self.servers[2])
+ self.assertTrue(ret, "Failed to stop glusterd")
+
+ ret, out, _ = g.run(
+ self.mnode,
+ "gluster v status | grep -A 4 'Rebalance' | awk 'NR==3{print "
+ "$3,$4}'")
+
+ ret = get_volume_status(self.mnode, self.volname, options="tasks")
+ rebalance_status = ret[self.volname]['task_status'][0]['statusStr']
+ self.assertIn(rebalance_status, out.replace("\n", ""))
+
+ def tearDown(self):
+ ret = is_glusterd_running(self.servers)
+ if ret:
+ ret = start_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to start glusterd on %s"
+ % self.servers)
+ g.log.info("Glusterd started successfully on %s", self.servers)
+
+ # Checking for peer status from every node
+ for server in self.servers:
+ ret = wait_for_peers_to_connect(server, self.servers)
+ if not ret:
+ raise ExecutionError("Servers are not in peer probed state")
+
+ ret = cleanup_volume(self.mnode, self.volname_2)
+ if not ret:
+ raise ExecutionError(
+ "Unable to delete volume % s" % self.volname_2)
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()