summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorYaniv Kaul <ykaul@redhat.com>2018-07-03 00:45:30 +0300
committerNigel Babu <nigelb@redhat.com>2018-07-17 04:14:01 +0000
commit02dbb7a68f828863e5b71dc15488e665d484ab6e (patch)
tree4ae10586c3f26f9e73a6d533bbd4af88094c6ef5 /tests
parent87f9679588c54c550447acdc8f0cc15626c7d881 (diff)
Shorten all the logs around verify_io_procs
No functional change, just make the tests a bit more readable. It could be moved to a decorator later on, wrapping tests. Change-Id: I484bb8b46907ee8f33dfcf4c960737a21819cd6a Signed-off-by: Yaniv Kaul <ykaul@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/functional/afr/heal/test_heal_info_while_accessing_file.py16
-rwxr-xr-xtests/functional/afr/heal/test_self_heal.py128
-rw-r--r--tests/functional/afr/heal/test_self_heal_daemon_process.py16
-rw-r--r--tests/functional/afr/test_afr_cli_gfid_splitbrain.py16
-rw-r--r--tests/functional/afr/test_client_side_quorum.py376
-rw-r--r--tests/functional/afr/test_conservative_merge_of_files_heal_command.py16
-rwxr-xr-xtests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py16
-rwxr-xr-xtests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py8
-rw-r--r--tests/functional/afr/test_heal_fail_1x3.py16
-rw-r--r--tests/functional/afr/test_heal_info_should_have_fixed_fields.py8
-rwxr-xr-xtests/functional/afr/test_manual_heal_should_trigger_heal.py8
-rw-r--r--tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py16
-rwxr-xr-xtests/functional/afr/test_shd_should_not_crash_executed_heal_info.py24
-rwxr-xr-xtests/functional/afr/test_volume_set_options.py16
-rwxr-xr-xtests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py32
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_brickcases.py10
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py8
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_rmvrf_files.py450
-rwxr-xr-xtests/functional/arbiter/test_mount_point_while_deleting_files.py16
-rwxr-xr-xtests/functional/arbiter/test_oom_on_client_heal_is_in_progress_arbiter.py8
-rw-r--r--tests/functional/arbiter/test_remove_faulty_subvol_and_add_new_subvol.py8
-rwxr-xr-xtests/functional/arbiter/test_replacing_all_arbiter_bricks_in_the_volume.py2
-rw-r--r--tests/functional/arbiter/test_resolving_meta_data_split_brain_extended_attributes.py24
-rw-r--r--tests/functional/bvt/test_cvt.py14
-rw-r--r--tests/functional/dht/test_rebalance_add_brick_command.py2
-rw-r--r--tests/functional/dht/test_rebalance_with_hidden_files.py8
-rw-r--r--tests/functional/glusterd/test_brick_log_messages.py7
-rw-r--r--tests/functional/glusterd/test_create_vol_with_used_bricks.py7
-rw-r--r--tests/functional/glusterd/test_readonly_option_on_volume.py17
-rw-r--r--tests/functional/glusterd/test_rebalance_hang.py2
-rw-r--r--tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py2
-rw-r--r--tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py8
-rw-r--r--tests/functional/glusterd/test_volume_status.py2
-rw-r--r--tests/functional/glusterd/test_volume_status_fd.py7
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py8
-rw-r--r--tests/functional/quota/test_limit_usage_deep_dir.py8
-rwxr-xr-xtests/functional/quota/test_quota_limit_dir_breadth.py8
-rw-r--r--tests/functional/snapshot/test_mount_snap.py23
-rw-r--r--tests/functional/snapshot/test_restore_online_vol.py8
-rw-r--r--tests/functional/snapshot/test_snap_delete_existing_scheduler.py8
-rw-r--r--tests/functional/snapshot/test_snap_delete_original_volume.py8
-rw-r--r--tests/functional/snapshot/test_snap_uss.py9
-rw-r--r--tests/functional/snapshot/test_snap_uss_while_io.py8
-rw-r--r--tests/functional/snapshot/test_uss_brick_down.py8
-rw-r--r--tests/functional/snapshot/test_uss_snap_active_deactive.py8
-rw-r--r--tests/functional/snapshot/test_validate_snaps_dir_over_uss.py14
46 files changed, 702 insertions, 730 deletions
diff --git a/tests/functional/afr/heal/test_heal_info_while_accessing_file.py b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
index 965adbdc1..2fa7b194c 100644
--- a/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
+++ b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
@@ -160,11 +160,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get entries before accessing file
g.log.info("Getting entries_before_accessing file...")
@@ -221,8 +221,8 @@ class TestSelfHeal(GlusterBaseClass):
'finished successfully.')
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py
index d7506a654..36bdb9948 100755
--- a/tests/functional/afr/heal/test_self_heal.py
+++ b/tests/functional/afr/heal/test_self_heal.py
@@ -183,11 +183,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks offline
g.log.info('Getting arequal before getting bricks offline...')
@@ -255,11 +255,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
@@ -399,11 +399,11 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info("All self-heal-daemons are online")
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
def test_entry_self_heal_heal_command(self):
"""
@@ -470,11 +470,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Command list to do different operations with data -
# create, rename, copy and delete
@@ -550,11 +550,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -707,11 +707,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Setting options
g.log.info('Setting options...')
@@ -754,11 +754,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -891,11 +891,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
@@ -931,11 +931,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -1039,11 +1039,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
@@ -1079,11 +1079,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -1200,11 +1200,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks offline
g.log.info('Getting arequal before getting bricks offline...')
@@ -1271,11 +1271,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -1411,11 +1411,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks offline
g.log.info('Getting arequal before getting bricks offline...')
@@ -1660,11 +1660,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -2052,11 +2052,11 @@ class TestMetadataSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Setting options
g.log.info('Setting options...')
diff --git a/tests/functional/afr/heal/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py
index 8e207c45a..ed71e4f2b 100644
--- a/tests/functional/afr/heal/test_self_heal_daemon_process.py
+++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py
@@ -651,11 +651,11 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
self.volname)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
@runs_on([['replicated', 'distributed-replicated'],
@@ -943,10 +943,10 @@ class SelfHealDaemonProcessTestsWithHealing(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# check the heal info
g.log.info("Get the pending heal info for the volume %s",
diff --git a/tests/functional/afr/test_afr_cli_gfid_splitbrain.py b/tests/functional/afr/test_afr_cli_gfid_splitbrain.py
index a886209f5..483e50fb8 100644
--- a/tests/functional/afr/test_afr_cli_gfid_splitbrain.py
+++ b/tests/functional/afr/test_afr_cli_gfid_splitbrain.py
@@ -119,10 +119,10 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
g.log.info("Successfully created a file from mount point")
g.log.info("bringing brick 1 back online")
@@ -147,10 +147,10 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
g.log.info("Successfully created a new file of same name "
"from mount point")
diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py
index 6291455de..a3b74a1ea 100644
--- a/tests/functional/afr/test_client_side_quorum.py
+++ b/tests/functional/afr/test_client_side_quorum.py
@@ -129,10 +129,10 @@ class ClientSideQuorumTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.volname)
@@ -268,10 +268,10 @@ class ClientSideQuorumTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating IO on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# stat on file
g.log.info("stat on file1.txt on all mounts")
@@ -426,10 +426,10 @@ class ClientSideQuorumTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# get the subvolumes
g.log.info("starting to get subvolumes for volume %s", self.volname)
@@ -465,10 +465,10 @@ class ClientSideQuorumTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# set cluster.quorum-type to auto
options = {"cluster.quorum-type": "auto"}
@@ -622,10 +622,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.volname)
@@ -658,10 +658,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# create directory user1
g.log.info("Start creating directory on all mounts...")
@@ -674,10 +674,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# create h/w link to file
g.log.info("Start creating hard link for file0.txt on all mounts")
@@ -746,10 +746,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating IO on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# stat on file
g.log.info("stat on file1.txt on all mounts")
@@ -843,10 +843,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -859,10 +859,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.volname)
@@ -900,10 +900,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -916,10 +916,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 1
options = {"cluster.quorum-count": "1"}
@@ -943,10 +943,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -959,10 +959,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 2
options = {"cluster.quorum-count": "2"}
@@ -1002,10 +1002,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# bring back the brick1 online for all subvolumes
g.log.info("bringing up the bricks : %s online",
@@ -1030,10 +1030,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1046,10 +1046,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# Bring down brick2 for all the subvolumes
g.log.info("Going to bring down the brick process "
@@ -1089,10 +1089,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 1
options = {"cluster.quorum-count": "1"}
@@ -1116,10 +1116,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1132,10 +1132,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set cluster.quorum-type to auto and cluster.quorum-count back to 2
options = {"cluster.quorum-type": "auto",
@@ -1160,10 +1160,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1176,10 +1176,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# Bring back brick2 online for all the subvolumes
g.log.info("bringing up the bricks : %s online",
@@ -1229,10 +1229,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the quorum-type to none
options = {"cluster.quorum-type": "none"}
@@ -1256,10 +1256,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1272,10 +1272,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
@runs_on([['distributed-replicated'],
@@ -1485,11 +1485,11 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
volumes_to_change_options = ['1', '3']
# set cluster.quorum-type to auto
@@ -1622,10 +1622,10 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
@runs_on([['replicated', 'distributed-replicated'], ['glusterfs']])
@@ -1757,10 +1757,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1773,10 +1773,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.volname)
@@ -1813,10 +1813,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1829,10 +1829,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# bring down brick2 for all the subvolumes
offline_brick2_from_replicasets = []
@@ -1863,10 +1863,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1879,10 +1879,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 1
options = {"cluster.quorum-count": "1"}
@@ -1906,10 +1906,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1922,10 +1922,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 2
options = {"cluster.quorum-count": "2"}
@@ -1965,10 +1965,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# bring back the brick1 online for all subvolumes
g.log.info("bringing up the brick : %s online",
@@ -1993,10 +1993,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -2009,10 +2009,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# Bring back brick2 online
g.log.info("bringing up the brick : %s online",
@@ -2037,10 +2037,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -2053,10 +2053,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set cluster.quorum-type to auto
options = {"cluster.quorum-type": "auto"}
@@ -2080,10 +2080,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -2096,10 +2096,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# bring down brick1 and brick2 for all the subvolumes
for i in range(0, num_subvols):
@@ -2144,10 +2144,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 1
options = {"cluster.quorum-count": "1"}
@@ -2187,10 +2187,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 3
options = {"cluster.quorum-count": "3"}
@@ -2230,10 +2230,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the quorum-type to none
options = {"cluster.quorum-type": "none"}
@@ -2257,10 +2257,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -2273,7 +2273,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
diff --git a/tests/functional/afr/test_conservative_merge_of_files_heal_command.py b/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
index 2f36635c5..3a6c62069 100644
--- a/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
+++ b/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
@@ -170,11 +170,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick 0 online
g.log.info('Bringing bricks %s online...', bricks_list[0])
@@ -214,11 +214,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick 1 online
g.log.info('Bringing bricks %s online...', bricks_list[1])
diff --git a/tests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py b/tests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py
index 9aee8aab5..1e9042db2 100755
--- a/tests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py
+++ b/tests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py
@@ -131,11 +131,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get arequal for mount before adding bricks
g.log.info('Getting arequal before adding bricks...')
@@ -240,11 +240,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick 0 online
g.log.info('Bringing bricks %s online...', bricks_list[0])
diff --git a/tests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py b/tests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py
index ec76fd2f7..2e4ddb9a1 100755
--- a/tests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py
+++ b/tests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py
@@ -213,8 +213,8 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Volume is not in split-brain state')
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
diff --git a/tests/functional/afr/test_heal_fail_1x3.py b/tests/functional/afr/test_heal_fail_1x3.py
index 458203444..596b145a5 100644
--- a/tests/functional/afr/test_heal_fail_1x3.py
+++ b/tests/functional/afr/test_heal_fail_1x3.py
@@ -108,10 +108,10 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
g.log.info("Successfully created a file from mount point")
# getting list of all bricks
@@ -140,10 +140,10 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
g.log.info("Successfully created a new file of same name "
"from mount point")
diff --git a/tests/functional/afr/test_heal_info_should_have_fixed_fields.py b/tests/functional/afr/test_heal_info_should_have_fixed_fields.py
index 8503888f5..ec9c1d95f 100644
--- a/tests/functional/afr/test_heal_info_should_have_fixed_fields.py
+++ b/tests/functional/afr/test_heal_info_should_have_fixed_fields.py
@@ -142,11 +142,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
bricks_to_bring_offline)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
diff --git a/tests/functional/afr/test_manual_heal_should_trigger_heal.py b/tests/functional/afr/test_manual_heal_should_trigger_heal.py
index 5ea312326..04771cc88 100755
--- a/tests/functional/afr/test_manual_heal_should_trigger_heal.py
+++ b/tests/functional/afr/test_manual_heal_should_trigger_heal.py
@@ -126,11 +126,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get arequal for mount before adding bricks
g.log.info('Getting arequal before adding bricks...')
diff --git a/tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py b/tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py
index 60fcb5952..0d7d82d18 100644
--- a/tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py
+++ b/tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py
@@ -193,17 +193,17 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
bricks_list[1])
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# Validate reading
- g.log.info("Wait for reading to complete ...")
- ret = validate_io_procs(all_mounts_procs_read, self.mounts)
- self.assertTrue(ret, "Reading failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs_read, self.mounts),
+ "Reading failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("Reading is successful on all mounts")
# Bring brick1 online
g.log.info('Bringing bricks %s online...', bricks_list[1])
diff --git a/tests/functional/afr/test_shd_should_not_crash_executed_heal_info.py b/tests/functional/afr/test_shd_should_not_crash_executed_heal_info.py
index deb2f39eb..189d70af9 100755
--- a/tests/functional/afr/test_shd_should_not_crash_executed_heal_info.py
+++ b/tests/functional/afr/test_shd_should_not_crash_executed_heal_info.py
@@ -140,11 +140,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick0 offline
g.log.info('Bringing bricks %s offline', bricks_list[0])
@@ -182,11 +182,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get heal info
g.log.info("Getting heal info...")
@@ -252,11 +252,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get heal info
g.log.info("Getting heal info...")
diff --git a/tests/functional/afr/test_volume_set_options.py b/tests/functional/afr/test_volume_set_options.py
index 1d8d6737f..96a93b135 100755
--- a/tests/functional/afr/test_volume_set_options.py
+++ b/tests/functional/afr/test_volume_set_options.py
@@ -153,11 +153,11 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Check arequals
# get the subvolumes
@@ -249,11 +249,11 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
diff --git a/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py b/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
index 4e6020872..96d5d7864 100755
--- a/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
+++ b/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
@@ -160,11 +160,11 @@ class ClientSideQuorumRestored(GlusterBaseClass):
# Validate IO
self.io_validation_complete = False
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Do IO and check on subvols with nodes to reboot
subvols_dict = get_subvols(self.mnode, self.volname)
@@ -284,11 +284,11 @@ class ClientSideQuorumRestored(GlusterBaseClass):
# Validate IO
self.io_validation_complete = False
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Do IO and check on subvols without nodes to reboot
subvols_dict = get_subvols(self.mnode, self.volname)
@@ -347,11 +347,11 @@ class ClientSideQuorumRestored(GlusterBaseClass):
# Validate IO
self.io_validation_complete = False
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# check if nodes are online
counter = 0
@@ -554,8 +554,8 @@ class ClientSideQuorumRestored(GlusterBaseClass):
for mounts_procs in all_mounts_procs:
# Validate IO
self.io_validation_complete = False
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
diff --git a/tests/functional/arbiter/brick_cases/test_brickcases.py b/tests/functional/arbiter/brick_cases/test_brickcases.py
index 5d3a03816..9cce9af03 100755
--- a/tests/functional/arbiter/brick_cases/test_brickcases.py
+++ b/tests/functional/arbiter/brick_cases/test_brickcases.py
@@ -167,11 +167,9 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Adding bricks to make an Arbiter Volume
g.log.info("Adding bricks to convert to Arbiter Volume")
@@ -387,8 +385,8 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
self.volname)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
diff --git a/tests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py b/tests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py
index 37709ca52..5a828efb1 100755
--- a/tests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py
+++ b/tests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py
@@ -176,11 +176,11 @@ class ListMount(GlusterBaseClass):
g.log.info('All self-heal Daemons are online')
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Checking volume status
g.log.info("Logging volume info and Status after bringing bricks "
diff --git a/tests/functional/arbiter/brick_cases/test_rmvrf_files.py b/tests/functional/arbiter/brick_cases/test_rmvrf_files.py
index 6fe959e64..5d8e87ed5 100755
--- a/tests/functional/arbiter/brick_cases/test_rmvrf_files.py
+++ b/tests/functional/arbiter/brick_cases/test_rmvrf_files.py
@@ -1,225 +1,225 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
-from glustolibs.gluster.volume_libs import (
- log_volume_info_and_status)
-from glustolibs.gluster.brick_libs import (
- bring_bricks_offline, bring_bricks_online,
- are_bricks_offline, are_bricks_online, select_bricks_to_bring_offline)
-from glustolibs.gluster.heal_libs import (
- monitor_heal_completion)
-from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.io.utils import (validate_io_procs,
- list_all_files_and_dirs_mounts,
- wait_for_io_to_complete)
-from glustolibs.misc.misc_libs import upload_scripts
-
-
-@runs_on([['replicated', 'distributed-replicated'],
- ['glusterfs', 'cifs', 'nfs']])
-class TestRmrfMount(GlusterBaseClass):
- """
- Description:
- Removing files when one of the brick if in offline state
- """
- @classmethod
- def setUpClass(cls):
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on mounts",
- cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
- cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
- ret = upload_scripts(cls.clients, [script_local_path])
- if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s"
- % cls.clients)
- g.log.info("Successfully uploaded IO scripts to clients %s",
- cls.clients)
-
- cls.counter = 1
- # int: Value of counter is used for dirname-start-num argument for
- # file_dir_ops.py create_deep_dirs_with_files.
-
- # The --dir-length argument value for file_dir_ops.py
- # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
- # method). This means every mount will create
- # 10 top level dirs. For every mountpoint/testcase to create new set of
- # dirs, we are incrementing the counter by --dir-length value i.e 10
- # in this test suite.
-
- # If we are changing the --dir-length to new value, ensure the counter
- # is also incremented by same value to create new set of files/dirs.
-
- def setUp(self):
- # Calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
-
- self.all_mounts_procs = []
- self.io_validation_complete = False
-
- # Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume and Mount Volume")
- ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
- if not ret:
- raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
- g.log.info("Successful in Setup Volume and Mount Volume")
-
- def tearDown(self):
- """
- If test method failed before validating IO, tearDown waits for the
- IO's to complete and checks for the IO exit status
-
- Unmount Volume and Cleanup Volume
- """
-
- # Wait for IO to complete if io validation is not executed in the
- # test method
- if not self.io_validation_complete:
- g.log.info("Wait for IO to complete as IO validation did not "
- "succeed in test method")
- ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
- if not ret:
- raise ExecutionError("IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
-
- # Unmount Volume and Cleanup Volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
- if not ret:
- raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
- g.log.info("Successful in Unmount Volume and Cleanup Volume")
-
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
-
- def test_self_heal(self):
- """
- Description:-
- - Create files on mount point
- - Kill one brick from volume
- - rm -rfv on mount point
- - bring bricks online
- - wait for heals
- - list
- """
- # pylint: disable=too-many-statements
-
- # IO on the mount point
- g.log.info("Starting IO on all mounts...")
- self.all_mounts_procs = []
- for mount_obj in self.mounts:
- g.log.info("Starting IO on %s:%s", mount_obj.client_system,
- mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
- "--dirname-start-num %d "
- "--dir-depth 2 "
- "--dir-length 35 "
- "--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- self.all_mounts_procs.append(proc)
- self.counter = self.counter + 10
-
- # Select bricks to bring offline
- bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
- self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
-
- # Killing one brick from the volume set
- g.log.info("Bringing bricks: %s offline", bricks_to_bring_offline)
- ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
- self.assertTrue(ret, ("Failed to bring bricks: %s offline",
- bricks_to_bring_offline))
- g.log.info("Successful in bringing bricks: %s offline",
- bricks_to_bring_offline)
-
- # Validate if bricks are offline
- g.log.info("Validating if bricks: %s are offline",
- bricks_to_bring_offline)
- ret = are_bricks_offline(self.mnode, self.volname,
- bricks_to_bring_offline)
- self.assertTrue(ret, "Not all the bricks in list: %s are offline" %
- bricks_to_bring_offline)
- g.log.info("Successfully validated that bricks: %s are all offline",
- bricks_to_bring_offline)
-
- # Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
-
- # Checking volume status
- g.log.info("Logging volume info and Status after bringing bricks "
- "offline from the volume %s", self.volname)
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
- g.log.info("Successful in logging volume info and status of volume %s",
- self.volname)
-
- # Removing files from the mount point when one brick is down
- g.log.info("Removing files from the mount point")
- mountpoint = self.mounts[0].mountpoint
- client = self.mounts[0].client_system
- cmd = "rm -rfv %s/*" % mountpoint
- ret, _, _ = g.run(client, cmd)
- if ret != 0:
- raise ExecutionError("failed to delete the files")
-
- # Bringing bricks online
- g.log.info('Bringing bricks %s online', bricks_to_bring_offline)
- ret = bring_bricks_online(self.mnode, self.volname,
- bricks_to_bring_offline)
- self.assertTrue(ret, 'Failed to bring bricks %s online' %
- bricks_to_bring_offline)
- g.log.info('Bricks %s are online', bricks_to_bring_offline)
-
- # Check if bricks are online
- g.log.info("Checking bricks are online or not")
- ret = are_bricks_online(self.mnode, self.volname,
- bricks_to_bring_offline)
- self.assertTrue(ret, 'Bricks %s are not online' %
- bricks_to_bring_offline)
- g.log.info('Bricks %s are online', bricks_to_bring_offline)
-
- # Monitoring heals on the volume
- g.log.info("Wait for heal completion...")
- ret = monitor_heal_completion(self.mnode, self.volname)
- self.assertTrue(ret, "Self heal didn't complete even after waiting "
- "for 20 minutes.")
- g.log.info("self-heal is successful after changing the volume type "
- "from replicated to arbitered volume")
-
- # List all files and dirs created
- g.log.info("List all files and directories:")
- ret = list_all_files_and_dirs_mounts(self.mounts)
- self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
+# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
+from glustolibs.gluster.volume_libs import (
+ log_volume_info_and_status)
+from glustolibs.gluster.brick_libs import (
+ bring_bricks_offline, bring_bricks_online,
+ are_bricks_offline, are_bricks_online, select_bricks_to_bring_offline)
+from glustolibs.gluster.heal_libs import (
+ monitor_heal_completion)
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.io.utils import (validate_io_procs,
+ list_all_files_and_dirs_mounts,
+ wait_for_io_to_complete)
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['replicated', 'distributed-replicated'],
+ ['glusterfs', 'cifs', 'nfs']])
+class TestRmrfMount(GlusterBaseClass):
+ """
+ Description:
+ Removing files when one of the brick if in offline state
+ """
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on mounts",
+ cls.clients)
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, [script_local_path])
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s"
+ % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ cls.counter = 1
+ # int: Value of counter is used for dirname-start-num argument for
+ # file_dir_ops.py create_deep_dirs_with_files.
+
+ # The --dir-length argument value for file_dir_ops.py
+ # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
+ # method). This means every mount will create
+ # 10 top level dirs. For every mountpoint/testcase to create new set of
+ # dirs, we are incrementing the counter by --dir-length value i.e 10
+ # in this test suite.
+
+ # If we are changing the --dir-length to new value, ensure the counter
+ # is also incremented by same value to create new set of files/dirs.
+
+ def setUp(self):
+ # Calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ self.all_mounts_procs = []
+ self.io_validation_complete = False
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup Volume and Mount Volume")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ def tearDown(self):
+ """
+ If test method failed before validating IO, tearDown waits for the
+ IO's to complete and checks for the IO exit status
+
+ Unmount Volume and Cleanup Volume
+ """
+
+ # Wait for IO to complete if io validation is not executed in the
+ # test method
+ if not self.io_validation_complete:
+ g.log.info("Wait for IO to complete as IO validation did not "
+ "succeed in test method")
+ ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
+ if not ret:
+ raise ExecutionError("IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Unmount Volume and Cleanup Volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ GlusterBaseClass.tearDown.im_func(self)
+
+ def test_self_heal(self):
+ """
+ Description:-
+ - Create files on mount point
+ - Kill one brick from volume
+ - rm -rfv on mount point
+ - bring bricks online
+ - wait for heals
+ - list
+ """
+ # pylint: disable=too-many-statements
+
+ # IO on the mount point
+ g.log.info("Starting IO on all mounts...")
+ self.all_mounts_procs = []
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 35 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path,
+ self.counter,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ self.all_mounts_procs.append(proc)
+ self.counter = self.counter + 10
+
+ # Select bricks to bring offline
+ bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
+ self.mnode, self.volname))
+ bricks_to_bring_offline = filter(None, (
+ bricks_to_bring_offline_dict['hot_tier_bricks'] +
+ bricks_to_bring_offline_dict['cold_tier_bricks'] +
+ bricks_to_bring_offline_dict['volume_bricks']))
+
+ # Killing one brick from the volume set
+ g.log.info("Bringing bricks: %s offline", bricks_to_bring_offline)
+ ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
+ self.assertTrue(ret, ("Failed to bring bricks: %s offline",
+ bricks_to_bring_offline))
+ g.log.info("Successful in bringing bricks: %s offline",
+ bricks_to_bring_offline)
+
+ # Validate if bricks are offline
+ g.log.info("Validating if bricks: %s are offline",
+ bricks_to_bring_offline)
+ ret = are_bricks_offline(self.mnode, self.volname,
+ bricks_to_bring_offline)
+ self.assertTrue(ret, "Not all the bricks in list: %s are offline" %
+ bricks_to_bring_offline)
+ g.log.info("Successfully validated that bricks: %s are all offline",
+ bricks_to_bring_offline)
+
+ # Validate IO
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
+ self.io_validation_complete = True
+
+ # Checking volume status
+ g.log.info("Logging volume info and Status after bringing bricks "
+ "offline from the volume %s", self.volname)
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Removing files from the mount point when one brick is down
+ g.log.info("Removing files from the mount point")
+ mountpoint = self.mounts[0].mountpoint
+ client = self.mounts[0].client_system
+ cmd = "rm -rfv %s/*" % mountpoint
+ ret, _, _ = g.run(client, cmd)
+ if ret != 0:
+ raise ExecutionError("failed to delete the files")
+
+ # Bringing bricks online
+ g.log.info('Bringing bricks %s online', bricks_to_bring_offline)
+ ret = bring_bricks_online(self.mnode, self.volname,
+ bricks_to_bring_offline)
+ self.assertTrue(ret, 'Failed to bring bricks %s online' %
+ bricks_to_bring_offline)
+ g.log.info('Bricks %s are online', bricks_to_bring_offline)
+
+ # Check if bricks are online
+ g.log.info("Checking bricks are online or not")
+ ret = are_bricks_online(self.mnode, self.volname,
+ bricks_to_bring_offline)
+ self.assertTrue(ret, 'Bricks %s are not online' %
+ bricks_to_bring_offline)
+ g.log.info('Bricks %s are online', bricks_to_bring_offline)
+
+ # Monitoring heals on the volume
+ g.log.info("Wait for heal completion...")
+ ret = monitor_heal_completion(self.mnode, self.volname)
+ self.assertTrue(ret, "Self heal didn't complete even after waiting "
+ "for 20 minutes.")
+ g.log.info("self-heal is successful after changing the volume type "
+ "from replicated to arbitered volume")
+
+ # List all files and dirs created
+ g.log.info("List all files and directories:")
+ ret = list_all_files_and_dirs_mounts(self.mounts)
+ self.assertTrue(ret, "Failed to list all files and dirs")
+ g.log.info("Listing all files and directories is successful")
diff --git a/tests/functional/arbiter/test_mount_point_while_deleting_files.py b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
index b8027dcab..c8e4804ee 100755
--- a/tests/functional/arbiter/test_mount_point_while_deleting_files.py
+++ b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
@@ -208,10 +208,10 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# select bricks to bring offline
volume_list = get_volume_list(self.mnode)
@@ -247,7 +247,7 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
diff --git a/tests/functional/arbiter/test_oom_on_client_heal_is_in_progress_arbiter.py b/tests/functional/arbiter/test_oom_on_client_heal_is_in_progress_arbiter.py
index 26854729a..0e55cdf8a 100755
--- a/tests/functional/arbiter/test_oom_on_client_heal_is_in_progress_arbiter.py
+++ b/tests/functional/arbiter/test_oom_on_client_heal_is_in_progress_arbiter.py
@@ -151,11 +151,11 @@ class ArbiterSelfHealTests(GlusterBaseClass):
bricks_to_bring_offline)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring 1-st brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
diff --git a/tests/functional/arbiter/test_remove_faulty_subvol_and_add_new_subvol.py b/tests/functional/arbiter/test_remove_faulty_subvol_and_add_new_subvol.py
index 5211d297a..6768d7660 100644
--- a/tests/functional/arbiter/test_remove_faulty_subvol_and_add_new_subvol.py
+++ b/tests/functional/arbiter/test_remove_faulty_subvol_and_add_new_subvol.py
@@ -146,11 +146,11 @@ class TestArbiterSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get areequal before removing subvol
g.log.info('Getting areequal before removing subvol...')
diff --git a/tests/functional/arbiter/test_replacing_all_arbiter_bricks_in_the_volume.py b/tests/functional/arbiter/test_replacing_all_arbiter_bricks_in_the_volume.py
index 1bb0fa317..db456757d 100755
--- a/tests/functional/arbiter/test_replacing_all_arbiter_bricks_in_the_volume.py
+++ b/tests/functional/arbiter/test_replacing_all_arbiter_bricks_in_the_volume.py
@@ -235,8 +235,6 @@ class TestArbiterSelfHeal(GlusterBaseClass):
g.log.info('Volume is not in split-brain state')
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.assertTrue(ret, "IO failed on some of the clients")
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
diff --git a/tests/functional/arbiter/test_resolving_meta_data_split_brain_extended_attributes.py b/tests/functional/arbiter/test_resolving_meta_data_split_brain_extended_attributes.py
index e6f68c0cf..0e9f945c2 100644
--- a/tests/functional/arbiter/test_resolving_meta_data_split_brain_extended_attributes.py
+++ b/tests/functional/arbiter/test_resolving_meta_data_split_brain_extended_attributes.py
@@ -127,11 +127,11 @@ class TestArbiterSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# get bricks with file
g.log.info('Getting bricks with file...')
@@ -176,11 +176,11 @@ class TestArbiterSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring arbiter brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
@@ -222,11 +222,11 @@ class TestArbiterSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring 1-st data brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index 16ff577bb..f1250b764 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -260,11 +260,9 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -328,11 +326,9 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -371,11 +367,9 @@ class TestGlusterVolumeSetSanity(GlusterBasicFeaturesSanityBaseClass):
volume_options_list)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -483,11 +477,9 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
"volume %s", path, quota_list, self.volname)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -585,11 +577,9 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
"deactivating ", snap_name)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -661,11 +651,9 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("self-heal is successful after replace-brick operation")
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -800,11 +788,9 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("self-heal is successful after replace-brick operation")
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
diff --git a/tests/functional/dht/test_rebalance_add_brick_command.py b/tests/functional/dht/test_rebalance_add_brick_command.py
index 7b4a39433..17c57843d 100644
--- a/tests/functional/dht/test_rebalance_add_brick_command.py
+++ b/tests/functional/dht/test_rebalance_add_brick_command.py
@@ -159,8 +159,6 @@ class ExerciseAddbrickCommand(GlusterBaseClass):
self.assertTrue(validate_io_procs([proc], [mount_obj]),
'IO Failed on client %s:%s' %
(mount_obj.client_system, mount_obj.mountpoint))
- g.log.info("IO is successful on mount point %s:%s",
- mount_obj.client_system, mount_obj.mountpoint)
g.log.debug("Unmounting mount points")
self.assertTrue(self.unmount_volume(self.mounts),
diff --git a/tests/functional/dht/test_rebalance_with_hidden_files.py b/tests/functional/dht/test_rebalance_with_hidden_files.py
index 6e51f0523..d3357bcfb 100644
--- a/tests/functional/dht/test_rebalance_with_hidden_files.py
+++ b/tests/functional/dht/test_rebalance_with_hidden_files.py
@@ -106,10 +106,10 @@ class RebalanceValidation(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# List all files and dirs created
g.log.info("List all files and directories:")
diff --git a/tests/functional/glusterd/test_brick_log_messages.py b/tests/functional/glusterd/test_brick_log_messages.py
index b21ab6770..d1f8f2fb5 100644
--- a/tests/functional/glusterd/test_brick_log_messages.py
+++ b/tests/functional/glusterd/test_brick_log_messages.py
@@ -118,9 +118,10 @@ class TestAddBrickFunctionality(GlusterBaseClass):
self.counter = self.counter + 10
# Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# Getting timestamp
_, timestamp, _ = g.run_local('date +%s')
diff --git a/tests/functional/glusterd/test_create_vol_with_used_bricks.py b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
index edd477b6f..940d7a451 100644
--- a/tests/functional/glusterd/test_create_vol_with_used_bricks.py
+++ b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
@@ -134,9 +134,10 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
self.counter = self.counter + 10
# Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# Unmount volume
ret = self.unmount_volume(self.mounts)
diff --git a/tests/functional/glusterd/test_readonly_option_on_volume.py b/tests/functional/glusterd/test_readonly_option_on_volume.py
index 09e0b4378..064d1ac8a 100644
--- a/tests/functional/glusterd/test_readonly_option_on_volume.py
+++ b/tests/functional/glusterd/test_readonly_option_on_volume.py
@@ -114,10 +114,11 @@ class TestReadOnlyOptionOnVolume(GlusterBaseClass):
self.counter = self.counter + 10
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertFalse(ret, "IO should fail on mount points of readonly "
- "volumes but IO success")
+ self.assertFalse(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO should fail on mount points of readonly "
+ "volumes but IO succeeded"
+ )
g.log.info("IO failed on mount points of read only volumes "
"as expected")
@@ -150,7 +151,7 @@ class TestReadOnlyOptionOnVolume(GlusterBaseClass):
self.counter = self.counter + 10
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
diff --git a/tests/functional/glusterd/test_rebalance_hang.py b/tests/functional/glusterd/test_rebalance_hang.py
index 8dde6d7d5..d96a4043a 100644
--- a/tests/functional/glusterd/test_rebalance_hang.py
+++ b/tests/functional/glusterd/test_rebalance_hang.py
@@ -152,11 +152,9 @@ class TestRebalanceHang(GlusterBaseClass):
self.all_mounts_procs.append(proc)
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
g.log.info("Starting rebalance with force on the volume")
ret, _, _ = rebalance_start(self.mnode, self.volname, False, True)
diff --git a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
index e1d7b3522..217eae5dc 100644
--- a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
+++ b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
@@ -150,11 +150,9 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
self.all_mounts_procs.append(proc)
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
remove_brick_list = bricks_list[2:4]
ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
diff --git a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
index 2f68dbf38..22aee4e3e 100644
--- a/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
+++ b/tests/functional/glusterd/test_restart_glusterd_while_rebalancing.py
@@ -125,10 +125,10 @@ class TestRestartGlusterdWhileRebalance(GlusterBaseClass):
self.counter = self.counter + 10
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# Forming brick list
brick_list = form_bricks_list_to_add_brick(
diff --git a/tests/functional/glusterd/test_volume_status.py b/tests/functional/glusterd/test_volume_status.py
index acfceb23b..ff5d5752f 100644
--- a/tests/functional/glusterd/test_volume_status.py
+++ b/tests/functional/glusterd/test_volume_status.py
@@ -158,11 +158,9 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
"'inode' of volume %s", self.volname)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
diff --git a/tests/functional/glusterd/test_volume_status_fd.py b/tests/functional/glusterd/test_volume_status_fd.py
index f06369633..2765325c5 100644
--- a/tests/functional/glusterd/test_volume_status_fd.py
+++ b/tests/functional/glusterd/test_volume_status_fd.py
@@ -140,10 +140,11 @@ class VolumeStatusFdWhenIOInProgress(GlusterBaseClass):
count += 1
# Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
index 06cd221ba..8a8b28cc3 100644
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
@@ -187,11 +187,9 @@ class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaIOBaseClass):
time.sleep(2)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -276,11 +274,9 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaIOBaseClass):
% self.volume['name'])
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -369,13 +365,11 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
time.sleep(15)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
if not ret:
g.log.error("IO failed on some of the clients")
return False
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
@@ -394,12 +388,10 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
NfsGaneshaIOBaseClass.setUp.im_func(self)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
self.io_validation_complete = True
if not ret:
raise ExecutionError("IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
g.log.info("List all files and directories:")
diff --git a/tests/functional/quota/test_limit_usage_deep_dir.py b/tests/functional/quota/test_limit_usage_deep_dir.py
index f7fcf2912..f066441e2 100644
--- a/tests/functional/quota/test_limit_usage_deep_dir.py
+++ b/tests/functional/quota/test_limit_usage_deep_dir.py
@@ -118,10 +118,10 @@ class LimitUsageDeepDir(GlusterBaseClass):
mount_object.client_system, mount_object.mountpoint)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# Set soft timeout to 1 second
g.log.info("Set quota soft timeout:")
diff --git a/tests/functional/quota/test_quota_limit_dir_breadth.py b/tests/functional/quota/test_quota_limit_dir_breadth.py
index 76747732f..b70c1187f 100755
--- a/tests/functional/quota/test_quota_limit_dir_breadth.py
+++ b/tests/functional/quota/test_quota_limit_dir_breadth.py
@@ -107,10 +107,10 @@ class QuotaLimitDirBreadth(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# Get dir list
g.log.info('Getting dir list in %s', self.volname)
diff --git a/tests/functional/snapshot/test_mount_snap.py b/tests/functional/snapshot/test_mount_snap.py
index fa56572c5..e01be9dd8 100644
--- a/tests/functional/snapshot/test_mount_snap.py
+++ b/tests/functional/snapshot/test_mount_snap.py
@@ -93,10 +93,10 @@ class TestSnapMountSnapshot(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# Creating snapshot
g.log.info("Starting to create snapshots")
@@ -157,10 +157,10 @@ class TestSnapMountSnapshot(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# start I/O
g.log.info("Starting IO on all mounts...")
@@ -175,9 +175,10 @@ class TestSnapMountSnapshot(GlusterBaseClass):
all_mounts_procs.append(proc)
# validate io should fail
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertFalse(ret, "Unexpected: IO Successfull on all clients")
+ self.assertFalse(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Unexpected: IO Successfull on all clients"
+ )
g.log.info("Expected: IO failed on clients")
def tearDown(self):
diff --git a/tests/functional/snapshot/test_restore_online_vol.py b/tests/functional/snapshot/test_restore_online_vol.py
index 9df2ddefa..023e9ead4 100644
--- a/tests/functional/snapshot/test_restore_online_vol.py
+++ b/tests/functional/snapshot/test_restore_online_vol.py
@@ -118,11 +118,11 @@ class SnapRSOnline(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("I/O successful on clients")
# Get stat of all the files/dirs created.
g.log.info("Get stat of all the files/dirs created.")
diff --git a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
index bce21eaec..436e7f214 100644
--- a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
+++ b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
@@ -223,10 +223,10 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
self.job_name)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# scheduler list (no active jobs should be there)
g.log.info("Starting to list all scheduler jobs")
diff --git a/tests/functional/snapshot/test_snap_delete_original_volume.py b/tests/functional/snapshot/test_snap_delete_original_volume.py
index 97daed868..1bf62fd87 100644
--- a/tests/functional/snapshot/test_snap_delete_original_volume.py
+++ b/tests/functional/snapshot/test_snap_delete_original_volume.py
@@ -97,11 +97,11 @@ class SnapshotSelfheal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Creating snapshot
g.log.info("Starting to Create snapshot")
diff --git a/tests/functional/snapshot/test_snap_uss.py b/tests/functional/snapshot/test_snap_uss.py
index dcc195fe3..aad6d897c 100644
--- a/tests/functional/snapshot/test_snap_uss.py
+++ b/tests/functional/snapshot/test_snap_uss.py
@@ -97,11 +97,10 @@ class SnapshotUssSnap(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- g.log.info("%s", all_mounts_procs)
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# starting I/O
g.log.info("Starting IO on all mounts...")
diff --git a/tests/functional/snapshot/test_snap_uss_while_io.py b/tests/functional/snapshot/test_snap_uss_while_io.py
index 990fadd17..e8435c579 100644
--- a/tests/functional/snapshot/test_snap_uss_while_io.py
+++ b/tests/functional/snapshot/test_snap_uss_while_io.py
@@ -164,11 +164,11 @@ class SnapshotUssWhileIo(GlusterBaseClass):
g.log.info("snapshot %s activated successfully", self.snap)
# Validate IO is completed
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# validate snapshots are listed under .snaps directory
g.log.info("Validating snaps under .snaps")
diff --git a/tests/functional/snapshot/test_uss_brick_down.py b/tests/functional/snapshot/test_uss_brick_down.py
index bb0dadfde..94b48c043 100644
--- a/tests/functional/snapshot/test_uss_brick_down.py
+++ b/tests/functional/snapshot/test_uss_brick_down.py
@@ -130,11 +130,11 @@ class SnapUssBrickDown(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("I/O successful on clients")
# Bring down 1 brick from brick list
g.log.info("Getting all the bricks of the volume")
diff --git a/tests/functional/snapshot/test_uss_snap_active_deactive.py b/tests/functional/snapshot/test_uss_snap_active_deactive.py
index db7f8ab73..40832e7cc 100644
--- a/tests/functional/snapshot/test_uss_snap_active_deactive.py
+++ b/tests/functional/snapshot/test_uss_snap_active_deactive.py
@@ -137,11 +137,11 @@ class SnapUssActiveD(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("I/O successful on clients")
# Enable USS
g.log.info("Enable USS on volume")
diff --git a/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py b/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py
index 5b91f8b40..8fcce0096 100644
--- a/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py
+++ b/tests/functional/snapshot/test_validate_snaps_dir_over_uss.py
@@ -139,11 +139,11 @@ class TestValidateUss(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("I/O successful on clients")
# get the snapshot list.
snap_list = get_snap_list(self.mnode)
@@ -212,8 +212,10 @@ class TestValidateUss(GlusterBaseClass):
# IO should fail
g.log.info("IO should Fail with ROFS error.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertFalse(ret, "Unexpected: IO successfully completed")
+ self.assertFalse(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Unexpected: IO successfully completed"
+ )
g.log.info("Expected: IO failed to complete")
# validate snap-0 present in mountpoint