summaryrefslogtreecommitdiffstats
path: root/tests/functional/bvt/test_cvt.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/bvt/test_cvt.py')
-rw-r--r--tests/functional/bvt/test_cvt.py110
1 files changed, 1 insertions, 109 deletions
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index 417f5461a..dea251256 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -76,8 +76,6 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on "
- "mounts", cls.clients)
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
ret = upload_scripts(cls.clients, cls.script_upload_path)
@@ -119,11 +117,9 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
self.get_super_method(self, 'setUp')()
# Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
- g.log.info("Successful in Setup Volume and Mount Volume")
# Temporary code:
# Additional checks to gather infomartion from all
@@ -154,7 +150,7 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
- self.counter = self.counter + 10
+ self.counter += 10
self.io_validation_complete = False
# Adding a delay of 15 seconds before test method starts. This
@@ -170,26 +166,19 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
# Wait for IO to complete if io validation is not executed in the
# test method
if not self.io_validation_complete:
- g.log.info("Wait for IO to complete as IO validation did not "
- "succeed in test method")
ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
if not ret:
raise ExecutionError("IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
if not ret:
raise ExecutionError("Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
# Unmount Volume and Cleanup Volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
- g.log.info("Successful in Unmount Volume and Cleanup Volume")
# Calling GlusterBaseClass tearDown
self.get_super_method(self, 'tearDown')()
@@ -212,7 +201,6 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
- validate IO
"""
# Log Volume Info and Status before expanding the volume.
- g.log.info("Logging volume info and Status before expanding volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -220,24 +208,17 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Expanding volume by adding bricks to the volume when IO in progress
- g.log.info("Start adding bricks to volume when IO in progress")
ret = expand_volume(self.mnode, self.volname, self.servers,
self.all_servers_info)
self.assertTrue(ret, ("Failed to expand the volume when IO in "
"progress on volume %s", self.volname))
- g.log.info("Expanding volume when IO in progress is successful on "
- "volume %s", self.volname)
# Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
# Log Volume Info and Status after expanding the volume
- g.log.info("Logging volume info and Status after expanding volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -245,14 +226,11 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Verify volume's all process are online
- g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online",
self.volname))
- g.log.info("Volume %s : All process are online", self.volname)
# Start Rebalance
- g.log.info("Starting Rebalance on the volume")
ret, _, _ = rebalance_start(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
"%s", self.volname))
@@ -264,7 +242,6 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
_, _, _ = rebalance_status(self.mnode, self.volname)
# Wait for rebalance to complete
- g.log.info("Waiting for rebalance to complete")
ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
timeout=1800)
self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
@@ -273,7 +250,6 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Check Rebalance status after rebalance is complete
- g.log.info("Checking Rebalance status")
ret, _, _ = rebalance_status(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to get rebalance status for the "
"volume %s", self.volname))
@@ -286,10 +262,8 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['distributed', 'distributed-replicated', 'distributed-dispersed'],
@@ -306,7 +280,6 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
- validate IO
"""
# Log Volume Info and Status before shrinking the volume.
- g.log.info("Logging volume info and Status before shrinking volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -329,7 +302,6 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info(ret)
# Shrinking volume by removing bricks from volume when IO in progress
- g.log.info("Start removing bricks from volume when IO in progress")
ret = shrink_volume(self.mnode, self.volname)
# Temporary code:
@@ -353,15 +325,11 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
"volume %s", self.volname)
# Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
# Log Volume Info and Status after shrinking the volume
- g.log.info("Logging volume info and Status after shrinking volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -369,13 +337,9 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Verify volume's all process are online
- g.log.info("Verifying volume's all process are online after "
- "shrinking volume")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online",
self.volname))
- g.log.info("Volume %s : All process are online after shrinking volume",
- self.volname)
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
@@ -383,10 +347,8 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['replicated', 'distributed', 'distributed-replicated',
@@ -409,14 +371,11 @@ class TestGlusterVolumeSetSanity(GlusterBasicFeaturesSanityBaseClass):
volume_options_list = ["features.uss", "features.shard"]
# enable and validate the volume options
- g.log.info("Setting the volume options: %s", volume_options_list)
ret = enable_and_validate_volume_options(self.mnode, self.volname,
volume_options_list,
time_delay=30)
self.assertTrue(ret, ("Unable to enable the volume options: %s",
volume_options_list))
- g.log.info("Successfully enabled all the volume options: %s",
- volume_options_list)
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
@@ -424,10 +383,8 @@ class TestGlusterVolumeSetSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['replicated', 'distributed', 'distributed-replicated',
@@ -442,14 +399,12 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
in progress.
"""
# Enable Quota
- g.log.info("Enabling quota on the volume %s", self.volname)
ret, _, _ = quota_enable(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to enable quota on the volume %s",
self.volname))
g.log.info("Successfully enabled quota on the volume %s", self.volname)
# Check if quota is enabled
- g.log.info("Validate Quota is enabled on the volume %s", self.volname)
ret = is_quota_enabled(self.mnode, self.volname)
self.assertTrue(ret, ("Quota is not enabled on the volume %s",
self.volname))
@@ -460,8 +415,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
path = "/"
# Set Quota limit on the root of the volume
- g.log.info("Set Quota Limit on the path %s of the volume %s",
- path, self.volname)
ret, _, _ = quota_limit_usage(self.mnode, self.volname,
path=path, limit="1GB")
self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
@@ -470,8 +423,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
path, self.volname)
# quota_fetch_list
- g.log.info("Get Quota list for path %s of the volume %s",
- path, self.volname)
quota_list = quota_fetch_list(self.mnode, self.volname, path=path)
self.assertIsNotNone(quota_list, ("Failed to get the quota list for "
"path %s of the volume %s",
@@ -484,7 +435,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
"volume %s", path, quota_list, self.volname)
# Disable quota
- g.log.info("Disable quota on the volume %s", self.volname)
ret, _, _ = quota_disable(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to disable quota on the volume %s",
self.volname))
@@ -492,7 +442,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Check if quota is still enabled (expected : Disabled)
- g.log.info("Validate Quota is enabled on the volume %s", self.volname)
ret = is_quota_enabled(self.mnode, self.volname)
self.assertFalse(ret, ("Quota is still enabled on the volume %s "
"(expected: Disable) ", self.volname))
@@ -500,14 +449,12 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Enable Quota
- g.log.info("Enabling quota on the volume %s", self.volname)
ret, _, _ = quota_enable(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to enable quota on the volume %s",
self.volname))
g.log.info("Successfully enabled quota on the volume %s", self.volname)
# Check if quota is enabled
- g.log.info("Validate Quota is enabled on the volume %s", self.volname)
ret = is_quota_enabled(self.mnode, self.volname)
self.assertTrue(ret, ("Quota is not enabled on the volume %s",
self.volname))
@@ -515,8 +462,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# quota_fetch_list
- g.log.info("Get Quota list for path %s of the volume %s",
- path, self.volname)
quota_list = quota_fetch_list(self.mnode, self.volname, path=path)
self.assertIsNotNone(quota_list, ("Failed to get the quota list for "
"path %s of the volume %s",
@@ -534,10 +479,8 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['replicated', 'distributed', 'distributed-replicated',
@@ -554,8 +497,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
"""
snap_name = "snap_cvt"
# Create Snapshot
- g.log.info("Creating snapshot %s of the volume %s",
- snap_name, self.volname)
ret, _, _ = snap_create(self.mnode, self.volname, snap_name)
self.assertEqual(ret, 0, ("Failed to create snapshot with name %s "
" of the volume %s", snap_name,
@@ -564,8 +505,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
snap_name, self.volname)
# List Snapshot
- g.log.info("Listing the snapshot created for the volume %s",
- self.volname)
snap_list = get_snap_list(self.mnode)
self.assertIsNotNone(snap_list, "Unable to get the Snapshot list")
self.assertIn(snap_name, snap_list,
@@ -574,8 +513,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
snap_name)
# Activate the snapshot
- g.log.info("Activating snapshot %s of the volume %s",
- snap_name, self.volname)
ret, _, _ = snap_activate(self.mnode, snap_name)
self.assertEqual(ret, 0, ("Failed to activate snapshot with name %s "
" of the volume %s", snap_name,
@@ -587,8 +524,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
uss_options = ["features.uss"]
if self.mount_type == "cifs":
uss_options.append("features.show-snapshot-directory")
- g.log.info("Enable uss options %s on the volume %s", uss_options,
- self.volname)
ret = enable_and_validate_volume_options(self.mnode, self.volname,
uss_options,
time_delay=30)
@@ -598,14 +533,11 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
uss_options, self.volname)
# Viewing snapshot from mount
- g.log.info("Viewing Snapshot %s from mounts:", snap_name)
ret = view_snaps_from_mount(self.mounts, snap_name)
self.assertTrue(ret, ("Failed to View snap %s from mounts", snap_name))
g.log.info("Successfully viewed snap %s from mounts", snap_name)
# De-Activate the snapshot
- g.log.info("Deactivating snapshot %s of the volume %s",
- snap_name, self.volname)
ret, _, _ = snap_deactivate(self.mnode, snap_name)
self.assertEqual(ret, 0, ("Failed to deactivate snapshot with name %s "
" of the volume %s", snap_name,
@@ -615,8 +547,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
# Viewing snapshot from mount (.snaps shouldn't be listed from mount)
for mount_obj in self.mounts:
- g.log.info("Viewing Snapshot %s from mount %s:%s", snap_name,
- mount_obj.client_system, mount_obj.mountpoint)
ret = view_snaps_from_mount(mount_obj, snap_name)
self.assertFalse(ret, ("Still able to View snap %s from mount "
"%s:%s", snap_name,
@@ -625,8 +555,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("%s not listed under .snaps from mount %s:%s",
snap_name, mount_obj.client_system,
mount_obj.mountpoint)
- g.log.info("%s not listed under .snaps from mounts after "
- "deactivating ", snap_name)
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
@@ -634,10 +562,8 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
@runs_on([['replicated', 'distributed-replicated'],
@@ -655,8 +581,6 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
- validate IO
"""
# Log Volume Info and Status before replacing brick from the volume.
- g.log.info("Logging volume info and Status before replacing brick "
- "from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -664,23 +588,17 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Replace brick from a sub-volume
- g.log.info("Replace a faulty brick from the volume")
ret = replace_brick_from_volume(self.mnode, self.volname,
self.servers, self.all_servers_info)
self.assertTrue(ret, "Failed to replace faulty brick from the volume")
g.log.info("Successfully replaced faulty brick from the volume")
# Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
# Log Volume Info and Status after replacing the brick
- g.log.info("Logging volume info and Status after replacing brick "
- "from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -688,20 +606,16 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Verify volume's all process are online
- g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online",
self.volname))
- g.log.info("Volume %s : All process are online", self.volname)
# Wait for self-heal to complete
- g.log.info("Wait for self-heal to complete")
ret = monitor_heal_completion(self.mnode, self.volname,
timeout_period=1800)
self.assertTrue(ret, "Self heal didn't complete even after waiting "
"for 30 minutes. 30 minutes is too much a time for "
"current test workload")
- g.log.info("self-heal is successful after replace-brick operation")
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
@@ -709,10 +623,8 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
# This test is disabled on nfs because of bug 1473668. A patch to apply the
@@ -753,8 +665,6 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
"'disperse.optimistic-change-log' to 'off'")
# Log Volume Info and Status before simulating brick failure
- g.log.info("Logging volume info and Status before bringing bricks "
- "offlien from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -767,16 +677,11 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks']
# Bring bricks offline
- g.log.info("Bringing bricks: %s offline", bricks_to_bring_offline)
ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
self.assertTrue(ret, ("Failed to bring bricks: %s offline",
bricks_to_bring_offline))
- g.log.info("Successful in bringing bricks: %s offline",
- bricks_to_bring_offline)
# Log Volume Info and Status
- g.log.info("Logging volume info and Status after bringing bricks "
- "offline from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -784,20 +689,15 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Validate if bricks are offline
- g.log.info("Validating if bricks: %s are offline",
- bricks_to_bring_offline)
ret = are_bricks_offline(self.mnode, self.volname,
bricks_to_bring_offline)
self.assertTrue(ret, ("Not all the bricks in list: %s are offline",
bricks_to_bring_offline))
- g.log.info("Successfully validated that bricks: %s are all offline",
- bricks_to_bring_offline)
# Add delay before bringing bricks online
time.sleep(40)
# Bring bricks online
- g.log.info("Bring bricks: %s online", bricks_to_bring_offline)
ret = bring_bricks_online(self.mnode, self.volname,
bricks_to_bring_offline)
self.assertTrue(ret, ("Failed to bring bricks: %s online",
@@ -806,17 +706,12 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
bricks_to_bring_offline)
# Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname,
timeout=400)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
# Log Volume Info and Status
- g.log.info("Logging volume info and Status after bringing bricks "
- "online from the volume %s", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
self.assertTrue(ret, ("Logging volume info and status failed on "
"volume %s", self.volname))
@@ -824,7 +719,6 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
self.volname)
# Verify volume's all process are online
- g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online",
self.volname))
@@ -845,7 +739,5 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
# List all files and dirs created
- g.log.info("List all files and directories:")
ret = list_all_files_and_dirs_mounts(self.mounts)
self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")