summaryrefslogtreecommitdiffstats
path: root/tests/functional/afr
diff options
context:
space:
mode:
authorYaniv Kaul <ykaul@redhat.com>2018-07-03 00:45:30 +0300
committerNigel Babu <nigelb@redhat.com>2018-07-17 04:14:01 +0000
commit02dbb7a68f828863e5b71dc15488e665d484ab6e (patch)
tree4ae10586c3f26f9e73a6d533bbd4af88094c6ef5 /tests/functional/afr
parent87f9679588c54c550447acdc8f0cc15626c7d881 (diff)
Shorten all the logs around verify_io_procs
No functional change, just make the tests a bit more readable. It could be moved to a decorator later on, wrapping tests. Change-Id: I484bb8b46907ee8f33dfcf4c960737a21819cd6a Signed-off-by: Yaniv Kaul <ykaul@redhat.com>
Diffstat (limited to 'tests/functional/afr')
-rw-r--r--tests/functional/afr/heal/test_heal_info_while_accessing_file.py16
-rwxr-xr-xtests/functional/afr/heal/test_self_heal.py128
-rw-r--r--tests/functional/afr/heal/test_self_heal_daemon_process.py16
-rw-r--r--tests/functional/afr/test_afr_cli_gfid_splitbrain.py16
-rw-r--r--tests/functional/afr/test_client_side_quorum.py376
-rw-r--r--tests/functional/afr/test_conservative_merge_of_files_heal_command.py16
-rwxr-xr-xtests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py16
-rwxr-xr-xtests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py8
-rw-r--r--tests/functional/afr/test_heal_fail_1x3.py16
-rw-r--r--tests/functional/afr/test_heal_info_should_have_fixed_fields.py8
-rwxr-xr-xtests/functional/afr/test_manual_heal_should_trigger_heal.py8
-rw-r--r--tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py16
-rwxr-xr-xtests/functional/afr/test_shd_should_not_crash_executed_heal_info.py24
-rwxr-xr-xtests/functional/afr/test_volume_set_options.py16
-rwxr-xr-xtests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py32
15 files changed, 356 insertions, 356 deletions
diff --git a/tests/functional/afr/heal/test_heal_info_while_accessing_file.py b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
index 965adbdc1..2fa7b194c 100644
--- a/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
+++ b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
@@ -160,11 +160,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get entries before accessing file
g.log.info("Getting entries_before_accessing file...")
@@ -221,8 +221,8 @@ class TestSelfHeal(GlusterBaseClass):
'finished successfully.')
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py
index d7506a654..36bdb9948 100755
--- a/tests/functional/afr/heal/test_self_heal.py
+++ b/tests/functional/afr/heal/test_self_heal.py
@@ -183,11 +183,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks offline
g.log.info('Getting arequal before getting bricks offline...')
@@ -255,11 +255,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
@@ -399,11 +399,11 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info("All self-heal-daemons are online")
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
def test_entry_self_heal_heal_command(self):
"""
@@ -470,11 +470,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Command list to do different operations with data -
# create, rename, copy and delete
@@ -550,11 +550,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -707,11 +707,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Setting options
g.log.info('Setting options...')
@@ -754,11 +754,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -891,11 +891,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
@@ -931,11 +931,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -1039,11 +1039,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
@@ -1079,11 +1079,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -1200,11 +1200,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks offline
g.log.info('Getting arequal before getting bricks offline...')
@@ -1271,11 +1271,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -1411,11 +1411,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks offline
g.log.info('Getting arequal before getting bricks offline...')
@@ -1660,11 +1660,11 @@ class TestSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Get arequal before getting bricks online
g.log.info('Getting arequal before getting bricks online...')
@@ -2052,11 +2052,11 @@ class TestMetadataSelfHeal(GlusterBaseClass):
self.all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# Setting options
g.log.info('Setting options...')
diff --git a/tests/functional/afr/heal/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py
index 8e207c45a..ed71e4f2b 100644
--- a/tests/functional/afr/heal/test_self_heal_daemon_process.py
+++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py
@@ -651,11 +651,11 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
self.volname)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
@runs_on([['replicated', 'distributed-replicated'],
@@ -943,10 +943,10 @@ class SelfHealDaemonProcessTestsWithHealing(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# check the heal info
g.log.info("Get the pending heal info for the volume %s",
diff --git a/tests/functional/afr/test_afr_cli_gfid_splitbrain.py b/tests/functional/afr/test_afr_cli_gfid_splitbrain.py
index a886209f5..483e50fb8 100644
--- a/tests/functional/afr/test_afr_cli_gfid_splitbrain.py
+++ b/tests/functional/afr/test_afr_cli_gfid_splitbrain.py
@@ -119,10 +119,10 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
g.log.info("Successfully created a file from mount point")
g.log.info("bringing brick 1 back online")
@@ -147,10 +147,10 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
g.log.info("Successfully created a new file of same name "
"from mount point")
diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py
index 6291455de..a3b74a1ea 100644
--- a/tests/functional/afr/test_client_side_quorum.py
+++ b/tests/functional/afr/test_client_side_quorum.py
@@ -129,10 +129,10 @@ class ClientSideQuorumTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.volname)
@@ -268,10 +268,10 @@ class ClientSideQuorumTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating IO on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# stat on file
g.log.info("stat on file1.txt on all mounts")
@@ -426,10 +426,10 @@ class ClientSideQuorumTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# get the subvolumes
g.log.info("starting to get subvolumes for volume %s", self.volname)
@@ -465,10 +465,10 @@ class ClientSideQuorumTests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# set cluster.quorum-type to auto
options = {"cluster.quorum-type": "auto"}
@@ -622,10 +622,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.volname)
@@ -658,10 +658,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# create directory user1
g.log.info("Start creating directory on all mounts...")
@@ -674,10 +674,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# create h/w link to file
g.log.info("Start creating hard link for file0.txt on all mounts")
@@ -746,10 +746,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating IO on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# stat on file
g.log.info("stat on file1.txt on all mounts")
@@ -843,10 +843,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -859,10 +859,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.volname)
@@ -900,10 +900,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -916,10 +916,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 1
options = {"cluster.quorum-count": "1"}
@@ -943,10 +943,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -959,10 +959,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 2
options = {"cluster.quorum-count": "2"}
@@ -1002,10 +1002,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# bring back the brick1 online for all subvolumes
g.log.info("bringing up the bricks : %s online",
@@ -1030,10 +1030,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1046,10 +1046,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# Bring down brick2 for all the subvolumes
g.log.info("Going to bring down the brick process "
@@ -1089,10 +1089,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 1
options = {"cluster.quorum-count": "1"}
@@ -1116,10 +1116,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1132,10 +1132,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set cluster.quorum-type to auto and cluster.quorum-count back to 2
options = {"cluster.quorum-type": "auto",
@@ -1160,10 +1160,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1176,10 +1176,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# Bring back brick2 online for all the subvolumes
g.log.info("bringing up the bricks : %s online",
@@ -1229,10 +1229,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the quorum-type to none
options = {"cluster.quorum-type": "none"}
@@ -1256,10 +1256,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1272,10 +1272,10 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
@runs_on([['distributed-replicated'],
@@ -1485,11 +1485,11 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
volumes_to_change_options = ['1', '3']
# set cluster.quorum-type to auto
@@ -1622,10 +1622,10 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Validating IO on mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
@runs_on([['replicated', 'distributed-replicated'], ['glusterfs']])
@@ -1757,10 +1757,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1773,10 +1773,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.volname)
@@ -1813,10 +1813,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1829,10 +1829,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# bring down brick2 for all the subvolumes
offline_brick2_from_replicasets = []
@@ -1863,10 +1863,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1879,10 +1879,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 1
options = {"cluster.quorum-count": "1"}
@@ -1906,10 +1906,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -1922,10 +1922,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 2
options = {"cluster.quorum-count": "2"}
@@ -1965,10 +1965,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# bring back the brick1 online for all subvolumes
g.log.info("bringing up the brick : %s online",
@@ -1993,10 +1993,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -2009,10 +2009,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# Bring back brick2 online
g.log.info("bringing up the brick : %s online",
@@ -2037,10 +2037,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -2053,10 +2053,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set cluster.quorum-type to auto
options = {"cluster.quorum-type": "auto"}
@@ -2080,10 +2080,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -2096,10 +2096,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# bring down brick1 and brick2 for all the subvolumes
for i in range(0, num_subvols):
@@ -2144,10 +2144,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 1
options = {"cluster.quorum-count": "1"}
@@ -2187,10 +2187,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the cluster.quorum-count to 3
options = {"cluster.quorum-count": "3"}
@@ -2230,10 +2230,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
# set the quorum-type to none
options = {"cluster.quorum-type": "none"}
@@ -2257,10 +2257,10 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# read the file
g.log.info("Start reading files on all mounts")
@@ -2273,7 +2273,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
all_mounts_procs.append(proc)
# Validate IO
- g.log.info("validating reads on all mounts")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "Reads failed on some of the clients")
- g.log.info("Reads successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "Reads failed on some of the clients"
+ )
diff --git a/tests/functional/afr/test_conservative_merge_of_files_heal_command.py b/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
index 2f36635c5..3a6c62069 100644
--- a/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
+++ b/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
@@ -170,11 +170,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick 0 online
g.log.info('Bringing bricks %s online...', bricks_list[0])
@@ -214,11 +214,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick 1 online
g.log.info('Bringing bricks %s online...', bricks_list[1])
diff --git a/tests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py b/tests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py
index 9aee8aab5..1e9042db2 100755
--- a/tests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py
+++ b/tests/functional/afr/test_dist_to_repl_automatic_heal_should_be_triggered.py
@@ -131,11 +131,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get arequal for mount before adding bricks
g.log.info('Getting arequal before adding bricks...')
@@ -240,11 +240,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick 0 online
g.log.info('Bringing bricks %s online...', bricks_list[0])
diff --git a/tests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py b/tests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py
index ec76fd2f7..2e4ddb9a1 100755
--- a/tests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py
+++ b/tests/functional/afr/test_heal_command_unsuccessful_as_bricks_down.py
@@ -213,8 +213,8 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Volume is not in split-brain state')
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
diff --git a/tests/functional/afr/test_heal_fail_1x3.py b/tests/functional/afr/test_heal_fail_1x3.py
index 458203444..596b145a5 100644
--- a/tests/functional/afr/test_heal_fail_1x3.py
+++ b/tests/functional/afr/test_heal_fail_1x3.py
@@ -108,10 +108,10 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
g.log.info("Successfully created a file from mount point")
# getting list of all bricks
@@ -140,10 +140,10 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
all_mounts_procs.append(proc)
# Validate I/O
- g.log.info("Wait for IO to complete and validate IO.....")
- ret = validate_io_procs(all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
g.log.info("Successfully created a new file of same name "
"from mount point")
diff --git a/tests/functional/afr/test_heal_info_should_have_fixed_fields.py b/tests/functional/afr/test_heal_info_should_have_fixed_fields.py
index 8503888f5..ec9c1d95f 100644
--- a/tests/functional/afr/test_heal_info_should_have_fixed_fields.py
+++ b/tests/functional/afr/test_heal_info_should_have_fixed_fields.py
@@ -142,11 +142,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
bricks_to_bring_offline)
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
diff --git a/tests/functional/afr/test_manual_heal_should_trigger_heal.py b/tests/functional/afr/test_manual_heal_should_trigger_heal.py
index 5ea312326..04771cc88 100755
--- a/tests/functional/afr/test_manual_heal_should_trigger_heal.py
+++ b/tests/functional/afr/test_manual_heal_should_trigger_heal.py
@@ -126,11 +126,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get arequal for mount before adding bricks
g.log.info('Getting arequal before adding bricks...')
diff --git a/tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py b/tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py
index 60fcb5952..0d7d82d18 100644
--- a/tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py
+++ b/tests/functional/afr/test_multiple_clients_dd_on_same_file_default.py
@@ -193,17 +193,17 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
bricks_list[1])
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
# Validate reading
- g.log.info("Wait for reading to complete ...")
- ret = validate_io_procs(all_mounts_procs_read, self.mounts)
- self.assertTrue(ret, "Reading failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs_read, self.mounts),
+ "Reading failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("Reading is successful on all mounts")
# Bring brick1 online
g.log.info('Bringing bricks %s online...', bricks_list[1])
diff --git a/tests/functional/afr/test_shd_should_not_crash_executed_heal_info.py b/tests/functional/afr/test_shd_should_not_crash_executed_heal_info.py
index deb2f39eb..189d70af9 100755
--- a/tests/functional/afr/test_shd_should_not_crash_executed_heal_info.py
+++ b/tests/functional/afr/test_shd_should_not_crash_executed_heal_info.py
@@ -140,11 +140,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick0 offline
g.log.info('Bringing bricks %s offline', bricks_list[0])
@@ -182,11 +182,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get heal info
g.log.info("Getting heal info...")
@@ -252,11 +252,11 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Get heal info
g.log.info("Getting heal info...")
diff --git a/tests/functional/afr/test_volume_set_options.py b/tests/functional/afr/test_volume_set_options.py
index 1d8d6737f..96a93b135 100755
--- a/tests/functional/afr/test_volume_set_options.py
+++ b/tests/functional/afr/test_volume_set_options.py
@@ -153,11 +153,11 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Check arequals
# get the subvolumes
@@ -249,11 +249,11 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
self.io_validation_complete = False
# Validate IO
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Bring brick online
g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
diff --git a/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py b/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
index 4e6020872..96d5d7864 100755
--- a/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
+++ b/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
@@ -160,11 +160,11 @@ class ClientSideQuorumRestored(GlusterBaseClass):
# Validate IO
self.io_validation_complete = False
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Do IO and check on subvols with nodes to reboot
subvols_dict = get_subvols(self.mnode, self.volname)
@@ -284,11 +284,11 @@ class ClientSideQuorumRestored(GlusterBaseClass):
# Validate IO
self.io_validation_complete = False
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# Do IO and check on subvols without nodes to reboot
subvols_dict = get_subvols(self.mnode, self.volname)
@@ -347,11 +347,11 @@ class ClientSideQuorumRestored(GlusterBaseClass):
# Validate IO
self.io_validation_complete = False
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")
# check if nodes are online
counter = 0
@@ -554,8 +554,8 @@ class ClientSideQuorumRestored(GlusterBaseClass):
for mounts_procs in all_mounts_procs:
# Validate IO
self.io_validation_complete = False
- g.log.info("Wait for IO to complete and validate IO ...")
- ret = validate_io_procs(mounts_procs, self.mounts)
- self.assertTrue(ret, "IO failed on some of the clients")
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- g.log.info("IO is successful on all mounts")