summaryrefslogtreecommitdiffstats
path: root/tests/functional/bvt/test_cvt.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/bvt/test_cvt.py')
-rw-r--r--tests/functional/bvt/test_cvt.py161
1 files changed, 37 insertions, 124 deletions
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index 11f933ae5..f8cb4f2ba 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -29,7 +29,6 @@
- n/w failure followed by heal
- replace-brick
"""
-import sys
import time
from glusto.core import Glusto as g
@@ -75,8 +74,6 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on "
- "mounts", cls.clients)
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
ret = upload_scripts(cls.clients, cls.script_upload_path)
@@ -87,6 +84,14 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
cls.clients)
cls.counter = 1
+
+ # Temporary code:
+ # Additional checks to gather infomartion from all
+ # servers for Bug 1810901 and setting log level to debug.
+ ret = set_volume_options(cls.mnode, 'all',
+ {'cluster.daemon-log-level': 'DEBUG'})
+ if not ret:
+ g.log.error('Failed to set cluster.daemon-log-level to DEBUG')
# int: Value of counter is used for dirname-start-num argument for
# file_dir_ops.py create_deep_dirs_with_files.
@@ -110,11 +115,21 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
self.get_super_method(self, 'setUp')()
# Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
- g.log.info("Successful in Setup Volume and Mount Volume")
+
+ # Temporary code:
+ # Additional checks to gather infomartion from all
+ # servers for Bug 1810901 and setting log level to debug.
+ for opt in ('diagnostics.brick-log-level',
+ 'diagnostics.client-log-level',
+ 'diagnostics.brick-sys-log-level',
+ 'diagnostics.client-sys-log-level'):
+ ret = set_volume_options(self.mnode, self.volname,
+ {opt: 'DEBUG'})
+ if not ret:
+ g.log.error('Failed to set volume option %s', opt)
# Start IO on mounts
g.log.info("Starting IO on all mounts...")
@@ -122,18 +137,18 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("/usr/bin/env python%d %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
- "--dir-length 15 "
+ "--dir-length 10 "
"--max-num-of-dirs 5 "
"--num-of-files 5 %s" % (
- sys.version_info.major, self.script_upload_path,
+ self.script_upload_path,
self.counter, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
- self.counter = self.counter + 10
+ self.counter += 10
self.io_validation_complete = False
# Adding a delay of 15 seconds before test method starts. This
@@ -149,26 +164,19 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
# Wait for IO to complete if io validation is not executed in the
# test method
if not self.io_validation_complete:
- g.log.info("Wait for IO to complete as IO validation did not "
- "succeed in test method")
ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
if not ret:
raise ExecutionError("IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
if not ret:
raise ExecutionError("Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
# Unmount Volume and Cleanup Volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
- g.log.info("Successful in Unmount Volume and Cleanup Volume")
# Calling GlusterBaseClass tearDown
self.get_super_method(self, 'tearDown')()
@@ -191,7 +199,6 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
- validate IO
"""
# Log Volume Info and Status before expanding the volume.
- g.log.info("Logging volume info and Status before expanding volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -199,24 +206,17 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Expanding volume by adding bricks to the volume when IO in progress
- g.log.info("Start adding bricks to volume when IO in progress")
ret = expand_volume(self.mnode, self.volname, self.servers,
self.all_servers_info)
self.assertTrue(ret, ("Failed to expand the volume when IO in "
"progress on volume %s", self.volname))
- g.log.info("Expanding volume when IO in progress is successful on "
- "volume %s", self.volname)
# Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
# Log Volume Info and Status after expanding the volume
- g.log.info("Logging volume info and Status after expanding volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -224,14 +224,11 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Verify volume's all process are online
- g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online",
self.volname))
- g.log.info("Volume %s : All process are online", self.volname)
# Start Rebalance
- g.log.info("Starting Rebalance on the volume")
ret, _, _ = rebalance_start(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
"%s", self.volname))
@@ -243,15 +240,14 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
_, _, _ = rebalance_status(self.mnode, self.volname)
# Wait for rebalance to complete
- g.log.info("Waiting for rebalance to complete")
- ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1800)
self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
"%s", self.volname))
g.log.info("Rebalance is successfully complete on the volume %s",
self.volname)
# Check Rebalance status after rebalance is complete
- g.log.info("Checking Rebalance status")
ret, _, _ = rebalance_status(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to get rebalance status for the "
"volume %s", self.volname))
@@ -264,10 +260,8 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['distributed', 'distributed-replicated', 'distributed-dispersed'],
@@ -284,7 +278,6 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
- validate IO
"""
# Log Volume Info and Status before shrinking the volume.
- g.log.info("Logging volume info and Status before shrinking volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -292,23 +285,19 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Shrinking volume by removing bricks from volume when IO in progress
- g.log.info("Start removing bricks from volume when IO in progress")
ret = shrink_volume(self.mnode, self.volname)
+
self.assertTrue(ret, ("Failed to shrink the volume when IO in "
"progress on volume %s", self.volname))
g.log.info("Shrinking volume when IO in progress is successful on "
"volume %s", self.volname)
# Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
# Log Volume Info and Status after shrinking the volume
- g.log.info("Logging volume info and Status after shrinking volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -316,13 +305,9 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Verify volume's all process are online
- g.log.info("Verifying volume's all process are online after "
- "shrinking volume")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online",
self.volname))
- g.log.info("Volume %s : All process are online after shrinking volume",
- self.volname)
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
@@ -330,10 +315,8 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['replicated', 'distributed', 'distributed-replicated',
@@ -356,14 +339,11 @@ class TestGlusterVolumeSetSanity(GlusterBasicFeaturesSanityBaseClass):
volume_options_list = ["features.uss", "features.shard"]
# enable and validate the volume options
- g.log.info("Setting the volume options: %s", volume_options_list)
ret = enable_and_validate_volume_options(self.mnode, self.volname,
volume_options_list,
time_delay=30)
self.assertTrue(ret, ("Unable to enable the volume options: %s",
volume_options_list))
- g.log.info("Successfully enabled all the volume options: %s",
- volume_options_list)
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
@@ -371,10 +351,8 @@ class TestGlusterVolumeSetSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['replicated', 'distributed', 'distributed-replicated',
@@ -389,14 +367,12 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
in progress.
"""
# Enable Quota
- g.log.info("Enabling quota on the volume %s", self.volname)
ret, _, _ = quota_enable(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to enable quota on the volume %s",
self.volname))
g.log.info("Successfully enabled quota on the volume %s", self.volname)
# Check if quota is enabled
- g.log.info("Validate Quota is enabled on the volume %s", self.volname)
ret = is_quota_enabled(self.mnode, self.volname)
self.assertTrue(ret, ("Quota is not enabled on the volume %s",
self.volname))
@@ -407,8 +383,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
path = "/"
# Set Quota limit on the root of the volume
- g.log.info("Set Quota Limit on the path %s of the volume %s",
- path, self.volname)
ret, _, _ = quota_limit_usage(self.mnode, self.volname,
path=path, limit="1GB")
self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
@@ -417,8 +391,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
path, self.volname)
# quota_fetch_list
- g.log.info("Get Quota list for path %s of the volume %s",
- path, self.volname)
quota_list = quota_fetch_list(self.mnode, self.volname, path=path)
self.assertIsNotNone(quota_list, ("Failed to get the quota list for "
"path %s of the volume %s",
@@ -431,7 +403,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
"volume %s", path, quota_list, self.volname)
# Disable quota
- g.log.info("Disable quota on the volume %s", self.volname)
ret, _, _ = quota_disable(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to disable quota on the volume %s",
self.volname))
@@ -439,7 +410,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Check if quota is still enabled (expected : Disabled)
- g.log.info("Validate Quota is enabled on the volume %s", self.volname)
ret = is_quota_enabled(self.mnode, self.volname)
self.assertFalse(ret, ("Quota is still enabled on the volume %s "
"(expected: Disable) ", self.volname))
@@ -447,14 +417,12 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Enable Quota
- g.log.info("Enabling quota on the volume %s", self.volname)
ret, _, _ = quota_enable(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to enable quota on the volume %s",
self.volname))
g.log.info("Successfully enabled quota on the volume %s", self.volname)
# Check if quota is enabled
- g.log.info("Validate Quota is enabled on the volume %s", self.volname)
ret = is_quota_enabled(self.mnode, self.volname)
self.assertTrue(ret, ("Quota is not enabled on the volume %s",
self.volname))
@@ -462,8 +430,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# quota_fetch_list
- g.log.info("Get Quota list for path %s of the volume %s",
- path, self.volname)
quota_list = quota_fetch_list(self.mnode, self.volname, path=path)
self.assertIsNotNone(quota_list, ("Failed to get the quota list for "
"path %s of the volume %s",
@@ -481,10 +447,8 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['replicated', 'distributed', 'distributed-replicated',
@@ -501,8 +465,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
"""
snap_name = "snap_cvt"
# Create Snapshot
- g.log.info("Creating snapshot %s of the volume %s",
- snap_name, self.volname)
ret, _, _ = snap_create(self.mnode, self.volname, snap_name)
self.assertEqual(ret, 0, ("Failed to create snapshot with name %s "
" of the volume %s", snap_name,
@@ -511,8 +473,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
snap_name, self.volname)
# List Snapshot
- g.log.info("Listing the snapshot created for the volume %s",
- self.volname)
snap_list = get_snap_list(self.mnode)
self.assertIsNotNone(snap_list, "Unable to get the Snapshot list")
self.assertIn(snap_name, snap_list,
@@ -521,8 +481,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
snap_name)
# Activate the snapshot
- g.log.info("Activating snapshot %s of the volume %s",
- snap_name, self.volname)
ret, _, _ = snap_activate(self.mnode, snap_name)
self.assertEqual(ret, 0, ("Failed to activate snapshot with name %s "
" of the volume %s", snap_name,
@@ -534,8 +492,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
uss_options = ["features.uss"]
if self.mount_type == "cifs":
uss_options.append("features.show-snapshot-directory")
- g.log.info("Enable uss options %s on the volume %s", uss_options,
- self.volname)
ret = enable_and_validate_volume_options(self.mnode, self.volname,
uss_options,
time_delay=30)
@@ -545,14 +501,11 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
uss_options, self.volname)
# Viewing snapshot from mount
- g.log.info("Viewing Snapshot %s from mounts:", snap_name)
ret = view_snaps_from_mount(self.mounts, snap_name)
self.assertTrue(ret, ("Failed to View snap %s from mounts", snap_name))
g.log.info("Successfully viewed snap %s from mounts", snap_name)
# De-Activate the snapshot
- g.log.info("Deactivating snapshot %s of the volume %s",
- snap_name, self.volname)
ret, _, _ = snap_deactivate(self.mnode, snap_name)
self.assertEqual(ret, 0, ("Failed to deactivate snapshot with name %s "
" of the volume %s", snap_name,
@@ -562,8 +515,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
# Viewing snapshot from mount (.snaps shouldn't be listed from mount)
for mount_obj in self.mounts:
- g.log.info("Viewing Snapshot %s from mount %s:%s", snap_name,
- mount_obj.client_system, mount_obj.mountpoint)
ret = view_snaps_from_mount(mount_obj, snap_name)
self.assertFalse(ret, ("Still able to View snap %s from mount "
"%s:%s", snap_name,
@@ -572,8 +523,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("%s not listed under .snaps from mount %s:%s",
snap_name, mount_obj.client_system,
mount_obj.mountpoint)
- g.log.info("%s not listed under .snaps from mounts after "
- "deactivating ", snap_name)
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
@@ -581,10 +530,8 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['replicated', 'distributed-replicated'],
@@ -602,8 +549,6 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
- validate IO
"""
# Log Volume Info and Status before replacing brick from the volume.
- g.log.info("Logging volume info and Status before replacing brick "
- "from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -611,23 +556,17 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Replace brick from a sub-volume
- g.log.info("Replace a faulty brick from the volume")
ret = replace_brick_from_volume(self.mnode, self.volname,
self.servers, self.all_servers_info)
self.assertTrue(ret, "Failed to replace faulty brick from the volume")
g.log.info("Successfully replaced faulty brick from the volume")
# Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
# Log Volume Info and Status after replacing the brick
- g.log.info("Logging volume info and Status after replacing brick "
- "from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -635,19 +574,16 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Verify volume's all process are online
- g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online",
self.volname))
- g.log.info("Volume %s : All process are online", self.volname)
# Wait for self-heal to complete
- g.log.info("Wait for self-heal to complete")
- ret = monitor_heal_completion(self.mnode, self.volname)
+ ret = monitor_heal_completion(self.mnode, self.volname,
+ timeout_period=1800)
self.assertTrue(ret, "Self heal didn't complete even after waiting "
- "for 20 minutes. 20 minutes is too much a time for "
+ "for 30 minutes. 30 minutes is too much a time for "
"current test workload")
- g.log.info("self-heal is successful after replace-brick operation")
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
@@ -655,10 +591,8 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
# This test is disabled on nfs because of bug 1473668. A patch to apply the
@@ -699,8 +633,6 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
"'disperse.optimistic-change-log' to 'off'")
# Log Volume Info and Status before simulating brick failure
- g.log.info("Logging volume info and Status before bringing bricks "
- "offlien from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -710,22 +642,14 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = list(filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks'])))
+ bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks']
# Bring bricks offline
- g.log.info("Bringing bricks: %s offline", bricks_to_bring_offline)
ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
self.assertTrue(ret, ("Failed to bring bricks: %s offline",
bricks_to_bring_offline))
- g.log.info("Successful in bringing bricks: %s offline",
- bricks_to_bring_offline)
# Log Volume Info and Status
- g.log.info("Logging volume info and Status after bringing bricks "
- "offline from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -733,20 +657,15 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Validate if bricks are offline
- g.log.info("Validating if bricks: %s are offline",
- bricks_to_bring_offline)
ret = are_bricks_offline(self.mnode, self.volname,
bricks_to_bring_offline)
self.assertTrue(ret, ("Not all the bricks in list: %s are offline",
bricks_to_bring_offline))
- g.log.info("Successfully validated that bricks: %s are all offline",
- bricks_to_bring_offline)
# Add delay before bringing bricks online
time.sleep(40)
# Bring bricks online
- g.log.info("Bring bricks: %s online", bricks_to_bring_offline)
ret = bring_bricks_online(self.mnode, self.volname,
bricks_to_bring_offline)
self.assertTrue(ret, ("Failed to bring bricks: %s online",
@@ -755,16 +674,12 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
bricks_to_bring_offline)
# Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
- ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname,
+ timeout=400)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
# Log Volume Info and Status
- g.log.info("Logging volume info and Status after bringing bricks "
- "online from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -772,7 +687,6 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Verify volume's all process are online
- g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online",
self.volname))
@@ -780,9 +694,10 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
# Wait for self-heal to complete
g.log.info("Wait for self-heal to complete")
- ret = monitor_heal_completion(self.mnode, self.volname)
+ ret = monitor_heal_completion(self.mnode, self.volname,
+ timeout_period=1800)
self.assertTrue(ret, "Self heal didn't complete even after waiting "
- "for 20 minutes. 20 minutes is too much a time for "
+ "for 30 minutes. 30 minutes is too much a time for "
"current test workload")
g.log.info("self-heal is successful after replace-brick operation")
@@ -792,7 +707,5 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")