diff options
Diffstat (limited to 'tests/functional/bvt')
-rw-r--r-- | tests/functional/bvt/test_basic.py | 14 | ||||
-rw-r--r-- | tests/functional/bvt/test_cvt.py | 154 | ||||
-rw-r--r-- | tests/functional/bvt/test_sosreport_interoperability.py | 141 | ||||
-rw-r--r-- | tests/functional/bvt/test_verify_volume_sanity.py | 11 | ||||
-rw-r--r-- | tests/functional/bvt/test_vvt.py | 27 |
5 files changed, 183 insertions, 164 deletions
diff --git a/tests/functional/bvt/test_basic.py b/tests/functional/bvt/test_basic.py index a031850cf..bf6c94958 100644 --- a/tests/functional/bvt/test_basic.py +++ b/tests/functional/bvt/test_basic.py @@ -44,7 +44,6 @@ class TestGlusterdSanity(GlusterBaseClass): peers are in connected state after glusterd restarts. """ # restart glusterd on all servers - g.log.info("Restart glusterd on all servers %s", self.servers) ret = restart_glusterd(self.servers) self.assertTrue(ret, ("Failed to restart glusterd on all servers %s", self.servers)) @@ -52,15 +51,12 @@ class TestGlusterdSanity(GlusterBaseClass): self.servers) # Check if glusterd is running on all servers(expected: active) - g.log.info("Check if glusterd is running on all servers %s" - "(expected: active)", self.servers) ret = is_glusterd_running(self.servers) self.assertEqual(ret, 0, ("Glusterd is not running on all servers %s", self.servers)) g.log.info("Glusterd is running on all the servers %s", self.servers) # Stop glusterd on all servers - g.log.info("Stop glusterd on all servers %s", self.servers) ret = stop_glusterd(self.servers) self.assertTrue(ret, ("Failed to stop glusterd on all servers %s", self.servers)) @@ -68,8 +64,6 @@ class TestGlusterdSanity(GlusterBaseClass): self.servers) # Check if glusterd is running on all servers(expected: not running) - g.log.info("Check if glusterd is running on all servers %s" - "(expected: not running)", self.servers) ret = is_glusterd_running(self.servers) self.assertNotEqual(ret, 0, ("Glusterd is still running on some " "servers %s", self.servers)) @@ -77,7 +71,6 @@ class TestGlusterdSanity(GlusterBaseClass): self.servers) # Start glusterd on all servers - g.log.info("Start glusterd on all servers %s", self.servers) ret = start_glusterd(self.servers) self.assertTrue(ret, ("Failed to start glusterd on all servers %s", self.servers)) @@ -85,8 +78,6 @@ class TestGlusterdSanity(GlusterBaseClass): self.servers) # Check if glusterd is running on all servers(expected: active) - g.log.info("Check if glusterd is running on all servers %s" - "(expected: active)", self.servers) ret = is_glusterd_running(self.servers) self.assertEqual(ret, 0, ("Glusterd is not running on all servers %s", self.servers)) @@ -96,10 +87,8 @@ class TestGlusterdSanity(GlusterBaseClass): time.sleep(30) # Validate all the peers are in connected state - g.log.info("Validating all the peers are in Cluster and Connected") ret = self.validate_peers_are_connected() self.assertTrue(ret, "Validating Peers to be in Cluster Failed") - g.log.info("All peers are in connected state") self.test_method_complete = True @@ -108,7 +97,6 @@ class TestGlusterdSanity(GlusterBaseClass): """ if not self.test_method_complete: # restart glusterd on all servers - g.log.info("Restart glusterd on all servers %s", self.servers) ret = restart_glusterd(self.servers) if not ret: raise ExecutionError("Failed to restart glusterd on all " @@ -120,12 +108,10 @@ class TestGlusterdSanity(GlusterBaseClass): time.sleep(30) # Validate all the peers are in connected state - g.log.info("Validating all the peers are in Cluster and Connected") ret = self.validate_peers_are_connected() if not ret: raise ExecutionError("Validating Peers to be in Cluster " "Failed") - g.log.info("All peers are in connected state") # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')() diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py index 9a2bcd9f7..f8cb4f2ba 100644 --- a/tests/functional/bvt/test_cvt.py +++ b/tests/functional/bvt/test_cvt.py @@ -74,8 +74,6 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass): cls.get_super_method(cls, 'setUpClass')() # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on " - "mounts", cls.clients) cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") ret = upload_scripts(cls.clients, cls.script_upload_path) @@ -86,6 +84,14 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass): cls.clients) cls.counter = 1 + + # Temporary code: + # Additional checks to gather infomartion from all + # servers for Bug 1810901 and setting log level to debug. + ret = set_volume_options(cls.mnode, 'all', + {'cluster.daemon-log-level': 'DEBUG'}) + if not ret: + g.log.error('Failed to set cluster.daemon-log-level to DEBUG') # int: Value of counter is used for dirname-start-num argument for # file_dir_ops.py create_deep_dirs_with_files. @@ -109,11 +115,21 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass): self.get_super_method(self, 'setUp')() # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume and Mount Volume") ret = self.setup_volume_and_mount_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to Setup_Volume and Mount_Volume") - g.log.info("Successful in Setup Volume and Mount Volume") + + # Temporary code: + # Additional checks to gather infomartion from all + # servers for Bug 1810901 and setting log level to debug. + for opt in ('diagnostics.brick-log-level', + 'diagnostics.client-log-level', + 'diagnostics.brick-sys-log-level', + 'diagnostics.client-sys-log-level'): + ret = set_volume_options(self.mnode, self.volname, + {opt: 'DEBUG'}) + if not ret: + g.log.error('Failed to set volume option %s', opt) # Start IO on mounts g.log.info("Starting IO on all mounts...") @@ -124,7 +140,7 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass): cmd = ("/usr/bin/env python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-depth 2 " - "--dir-length 15 " + "--dir-length 10 " "--max-num-of-dirs 5 " "--num-of-files 5 %s" % ( self.script_upload_path, @@ -132,7 +148,7 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass): proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) self.all_mounts_procs.append(proc) - self.counter = self.counter + 10 + self.counter += 10 self.io_validation_complete = False # Adding a delay of 15 seconds before test method starts. This @@ -148,26 +164,19 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass): # Wait for IO to complete if io validation is not executed in the # test method if not self.io_validation_complete: - g.log.info("Wait for IO to complete as IO validation did not " - "succeed in test method") ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts) if not ret: raise ExecutionError("IO failed on some of the clients") - g.log.info("IO is successful on all mounts") # List all files and dirs created - g.log.info("List all files and directories:") ret = list_all_files_and_dirs_mounts(self.mounts) if not ret: raise ExecutionError("Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") # Unmount Volume and Cleanup Volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to Unmount Volume and Cleanup Volume") - g.log.info("Successful in Unmount Volume and Cleanup Volume") # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')() @@ -190,7 +199,6 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass): - validate IO """ # Log Volume Info and Status before expanding the volume. - g.log.info("Logging volume info and Status before expanding volume") ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -198,24 +206,17 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Expanding volume by adding bricks to the volume when IO in progress - g.log.info("Start adding bricks to volume when IO in progress") ret = expand_volume(self.mnode, self.volname, self.servers, self.all_servers_info) self.assertTrue(ret, ("Failed to expand the volume when IO in " "progress on volume %s", self.volname)) - g.log.info("Expanding volume when IO in progress is successful on " - "volume %s", self.volname) # Wait for volume processes to be online - g.log.info("Wait for volume processes to be online") ret = wait_for_volume_process_to_be_online(self.mnode, self.volname) self.assertTrue(ret, ("Failed to wait for volume %s processes to " "be online", self.volname)) - g.log.info("Successful in waiting for volume %s processes to be " - "online", self.volname) # Log Volume Info and Status after expanding the volume - g.log.info("Logging volume info and Status after expanding volume") ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -223,14 +224,11 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Verify volume's all process are online - g.log.info("Verifying volume's all process are online") ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online", self.volname)) - g.log.info("Volume %s : All process are online", self.volname) # Start Rebalance - g.log.info("Starting Rebalance on the volume") ret, _, _ = rebalance_start(self.mnode, self.volname) self.assertEqual(ret, 0, ("Failed to start rebalance on the volume " "%s", self.volname)) @@ -242,15 +240,14 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass): _, _, _ = rebalance_status(self.mnode, self.volname) # Wait for rebalance to complete - g.log.info("Waiting for rebalance to complete") - ret = wait_for_rebalance_to_complete(self.mnode, self.volname) + ret = wait_for_rebalance_to_complete(self.mnode, self.volname, + timeout=1800) self.assertTrue(ret, ("Rebalance is not yet complete on the volume " "%s", self.volname)) g.log.info("Rebalance is successfully complete on the volume %s", self.volname) # Check Rebalance status after rebalance is complete - g.log.info("Checking Rebalance status") ret, _, _ = rebalance_status(self.mnode, self.volname) self.assertEqual(ret, 0, ("Failed to get rebalance status for the " "volume %s", self.volname)) @@ -263,10 +260,8 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass): self.assertTrue(ret, "IO failed on some of the clients") # List all files and dirs created - g.log.info("List all files and directories:") ret = list_all_files_and_dirs_mounts(self.mounts) self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") @runs_on([['distributed', 'distributed-replicated', 'distributed-dispersed'], @@ -283,7 +278,6 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass): - validate IO """ # Log Volume Info and Status before shrinking the volume. - g.log.info("Logging volume info and Status before shrinking volume") ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -291,23 +285,19 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Shrinking volume by removing bricks from volume when IO in progress - g.log.info("Start removing bricks from volume when IO in progress") ret = shrink_volume(self.mnode, self.volname) + self.assertTrue(ret, ("Failed to shrink the volume when IO in " "progress on volume %s", self.volname)) g.log.info("Shrinking volume when IO in progress is successful on " "volume %s", self.volname) # Wait for volume processes to be online - g.log.info("Wait for volume processes to be online") ret = wait_for_volume_process_to_be_online(self.mnode, self.volname) self.assertTrue(ret, ("Failed to wait for volume %s processes to " "be online", self.volname)) - g.log.info("Successful in waiting for volume %s processes to be " - "online", self.volname) # Log Volume Info and Status after shrinking the volume - g.log.info("Logging volume info and Status after shrinking volume") ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -315,13 +305,9 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Verify volume's all process are online - g.log.info("Verifying volume's all process are online after " - "shrinking volume") ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online", self.volname)) - g.log.info("Volume %s : All process are online after shrinking volume", - self.volname) # Validate IO ret = validate_io_procs(self.all_mounts_procs, self.mounts) @@ -329,10 +315,8 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass): self.assertTrue(ret, "IO failed on some of the clients") # List all files and dirs created - g.log.info("List all files and directories:") ret = list_all_files_and_dirs_mounts(self.mounts) self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") @runs_on([['replicated', 'distributed', 'distributed-replicated', @@ -355,14 +339,11 @@ class TestGlusterVolumeSetSanity(GlusterBasicFeaturesSanityBaseClass): volume_options_list = ["features.uss", "features.shard"] # enable and validate the volume options - g.log.info("Setting the volume options: %s", volume_options_list) ret = enable_and_validate_volume_options(self.mnode, self.volname, volume_options_list, time_delay=30) self.assertTrue(ret, ("Unable to enable the volume options: %s", volume_options_list)) - g.log.info("Successfully enabled all the volume options: %s", - volume_options_list) # Validate IO ret = validate_io_procs(self.all_mounts_procs, self.mounts) @@ -370,10 +351,8 @@ class TestGlusterVolumeSetSanity(GlusterBasicFeaturesSanityBaseClass): self.assertTrue(ret, "IO failed on some of the clients") # List all files and dirs created - g.log.info("List all files and directories:") ret = list_all_files_and_dirs_mounts(self.mounts) self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") @runs_on([['replicated', 'distributed', 'distributed-replicated', @@ -388,14 +367,12 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass): in progress. """ # Enable Quota - g.log.info("Enabling quota on the volume %s", self.volname) ret, _, _ = quota_enable(self.mnode, self.volname) self.assertEqual(ret, 0, ("Failed to enable quota on the volume %s", self.volname)) g.log.info("Successfully enabled quota on the volume %s", self.volname) # Check if quota is enabled - g.log.info("Validate Quota is enabled on the volume %s", self.volname) ret = is_quota_enabled(self.mnode, self.volname) self.assertTrue(ret, ("Quota is not enabled on the volume %s", self.volname)) @@ -406,8 +383,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass): path = "/" # Set Quota limit on the root of the volume - g.log.info("Set Quota Limit on the path %s of the volume %s", - path, self.volname) ret, _, _ = quota_limit_usage(self.mnode, self.volname, path=path, limit="1GB") self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of " @@ -416,8 +391,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass): path, self.volname) # quota_fetch_list - g.log.info("Get Quota list for path %s of the volume %s", - path, self.volname) quota_list = quota_fetch_list(self.mnode, self.volname, path=path) self.assertIsNotNone(quota_list, ("Failed to get the quota list for " "path %s of the volume %s", @@ -430,7 +403,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass): "volume %s", path, quota_list, self.volname) # Disable quota - g.log.info("Disable quota on the volume %s", self.volname) ret, _, _ = quota_disable(self.mnode, self.volname) self.assertEqual(ret, 0, ("Failed to disable quota on the volume %s", self.volname)) @@ -438,7 +410,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Check if quota is still enabled (expected : Disabled) - g.log.info("Validate Quota is enabled on the volume %s", self.volname) ret = is_quota_enabled(self.mnode, self.volname) self.assertFalse(ret, ("Quota is still enabled on the volume %s " "(expected: Disable) ", self.volname)) @@ -446,14 +417,12 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Enable Quota - g.log.info("Enabling quota on the volume %s", self.volname) ret, _, _ = quota_enable(self.mnode, self.volname) self.assertEqual(ret, 0, ("Failed to enable quota on the volume %s", self.volname)) g.log.info("Successfully enabled quota on the volume %s", self.volname) # Check if quota is enabled - g.log.info("Validate Quota is enabled on the volume %s", self.volname) ret = is_quota_enabled(self.mnode, self.volname) self.assertTrue(ret, ("Quota is not enabled on the volume %s", self.volname)) @@ -461,8 +430,6 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # quota_fetch_list - g.log.info("Get Quota list for path %s of the volume %s", - path, self.volname) quota_list = quota_fetch_list(self.mnode, self.volname, path=path) self.assertIsNotNone(quota_list, ("Failed to get the quota list for " "path %s of the volume %s", @@ -480,10 +447,8 @@ class TestQuotaSanity(GlusterBasicFeaturesSanityBaseClass): self.assertTrue(ret, "IO failed on some of the clients") # List all files and dirs created - g.log.info("List all files and directories:") ret = list_all_files_and_dirs_mounts(self.mounts) self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") @runs_on([['replicated', 'distributed', 'distributed-replicated', @@ -500,8 +465,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass): """ snap_name = "snap_cvt" # Create Snapshot - g.log.info("Creating snapshot %s of the volume %s", - snap_name, self.volname) ret, _, _ = snap_create(self.mnode, self.volname, snap_name) self.assertEqual(ret, 0, ("Failed to create snapshot with name %s " " of the volume %s", snap_name, @@ -510,8 +473,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass): snap_name, self.volname) # List Snapshot - g.log.info("Listing the snapshot created for the volume %s", - self.volname) snap_list = get_snap_list(self.mnode) self.assertIsNotNone(snap_list, "Unable to get the Snapshot list") self.assertIn(snap_name, snap_list, @@ -520,8 +481,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass): snap_name) # Activate the snapshot - g.log.info("Activating snapshot %s of the volume %s", - snap_name, self.volname) ret, _, _ = snap_activate(self.mnode, snap_name) self.assertEqual(ret, 0, ("Failed to activate snapshot with name %s " " of the volume %s", snap_name, @@ -533,8 +492,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass): uss_options = ["features.uss"] if self.mount_type == "cifs": uss_options.append("features.show-snapshot-directory") - g.log.info("Enable uss options %s on the volume %s", uss_options, - self.volname) ret = enable_and_validate_volume_options(self.mnode, self.volname, uss_options, time_delay=30) @@ -544,14 +501,11 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass): uss_options, self.volname) # Viewing snapshot from mount - g.log.info("Viewing Snapshot %s from mounts:", snap_name) ret = view_snaps_from_mount(self.mounts, snap_name) self.assertTrue(ret, ("Failed to View snap %s from mounts", snap_name)) g.log.info("Successfully viewed snap %s from mounts", snap_name) # De-Activate the snapshot - g.log.info("Deactivating snapshot %s of the volume %s", - snap_name, self.volname) ret, _, _ = snap_deactivate(self.mnode, snap_name) self.assertEqual(ret, 0, ("Failed to deactivate snapshot with name %s " " of the volume %s", snap_name, @@ -561,8 +515,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass): # Viewing snapshot from mount (.snaps shouldn't be listed from mount) for mount_obj in self.mounts: - g.log.info("Viewing Snapshot %s from mount %s:%s", snap_name, - mount_obj.client_system, mount_obj.mountpoint) ret = view_snaps_from_mount(mount_obj, snap_name) self.assertFalse(ret, ("Still able to View snap %s from mount " "%s:%s", snap_name, @@ -571,8 +523,6 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass): g.log.info("%s not listed under .snaps from mount %s:%s", snap_name, mount_obj.client_system, mount_obj.mountpoint) - g.log.info("%s not listed under .snaps from mounts after " - "deactivating ", snap_name) # Validate IO ret = validate_io_procs(self.all_mounts_procs, self.mounts) @@ -580,10 +530,8 @@ class TestSnapshotSanity(GlusterBasicFeaturesSanityBaseClass): self.assertTrue(ret, "IO failed on some of the clients") # List all files and dirs created - g.log.info("List all files and directories:") ret = list_all_files_and_dirs_mounts(self.mounts) self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") @runs_on([['replicated', 'distributed-replicated'], @@ -601,8 +549,6 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass): - validate IO """ # Log Volume Info and Status before replacing brick from the volume. - g.log.info("Logging volume info and Status before replacing brick " - "from the volume %s", self.volname) ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -610,23 +556,17 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Replace brick from a sub-volume - g.log.info("Replace a faulty brick from the volume") ret = replace_brick_from_volume(self.mnode, self.volname, self.servers, self.all_servers_info) self.assertTrue(ret, "Failed to replace faulty brick from the volume") g.log.info("Successfully replaced faulty brick from the volume") # Wait for volume processes to be online - g.log.info("Wait for volume processes to be online") ret = wait_for_volume_process_to_be_online(self.mnode, self.volname) self.assertTrue(ret, ("Failed to wait for volume %s processes to " "be online", self.volname)) - g.log.info("Successful in waiting for volume %s processes to be " - "online", self.volname) # Log Volume Info and Status after replacing the brick - g.log.info("Logging volume info and Status after replacing brick " - "from the volume %s", self.volname) ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -634,19 +574,16 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Verify volume's all process are online - g.log.info("Verifying volume's all process are online") ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online", self.volname)) - g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal to complete - g.log.info("Wait for self-heal to complete") - ret = monitor_heal_completion(self.mnode, self.volname) + ret = monitor_heal_completion(self.mnode, self.volname, + timeout_period=1800) self.assertTrue(ret, "Self heal didn't complete even after waiting " - "for 20 minutes. 20 minutes is too much a time for " + "for 30 minutes. 30 minutes is too much a time for " "current test workload") - g.log.info("self-heal is successful after replace-brick operation") # Validate IO ret = validate_io_procs(self.all_mounts_procs, self.mounts) @@ -654,10 +591,8 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass): self.assertTrue(ret, "IO failed on some of the clients") # List all files and dirs created - g.log.info("List all files and directories:") ret = list_all_files_and_dirs_mounts(self.mounts) self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") # This test is disabled on nfs because of bug 1473668. A patch to apply the @@ -698,8 +633,6 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass): "'disperse.optimistic-change-log' to 'off'") # Log Volume Info and Status before simulating brick failure - g.log.info("Logging volume info and Status before bringing bricks " - "offlien from the volume %s", self.volname) ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -709,22 +642,14 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass): # Select bricks to bring offline bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( self.mnode, self.volname)) - bricks_to_bring_offline = list(filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks']))) + bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks'] # Bring bricks offline - g.log.info("Bringing bricks: %s offline", bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, ("Failed to bring bricks: %s offline", bricks_to_bring_offline)) - g.log.info("Successful in bringing bricks: %s offline", - bricks_to_bring_offline) # Log Volume Info and Status - g.log.info("Logging volume info and Status after bringing bricks " - "offline from the volume %s", self.volname) ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -732,20 +657,15 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Validate if bricks are offline - g.log.info("Validating if bricks: %s are offline", - bricks_to_bring_offline) ret = are_bricks_offline(self.mnode, self.volname, bricks_to_bring_offline) self.assertTrue(ret, ("Not all the bricks in list: %s are offline", bricks_to_bring_offline)) - g.log.info("Successfully validated that bricks: %s are all offline", - bricks_to_bring_offline) # Add delay before bringing bricks online time.sleep(40) # Bring bricks online - g.log.info("Bring bricks: %s online", bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, bricks_to_bring_offline) self.assertTrue(ret, ("Failed to bring bricks: %s online", @@ -754,16 +674,12 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass): bricks_to_bring_offline) # Wait for volume processes to be online - g.log.info("Wait for volume processes to be online") - ret = wait_for_volume_process_to_be_online(self.mnode, self.volname) + ret = wait_for_volume_process_to_be_online(self.mnode, self.volname, + timeout=400) self.assertTrue(ret, ("Failed to wait for volume %s processes to " "be online", self.volname)) - g.log.info("Successful in waiting for volume %s processes to be " - "online", self.volname) # Log Volume Info and Status - g.log.info("Logging volume info and Status after bringing bricks " - "online from the volume %s", self.volname) ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Logging volume info and status failed on " "volume %s", self.volname)) @@ -771,7 +687,6 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass): self.volname) # Verify volume's all process are online - g.log.info("Verifying volume's all process are online") ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online", self.volname)) @@ -779,9 +694,10 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass): # Wait for self-heal to complete g.log.info("Wait for self-heal to complete") - ret = monitor_heal_completion(self.mnode, self.volname) + ret = monitor_heal_completion(self.mnode, self.volname, + timeout_period=1800) self.assertTrue(ret, "Self heal didn't complete even after waiting " - "for 20 minutes. 20 minutes is too much a time for " + "for 30 minutes. 30 minutes is too much a time for " "current test workload") g.log.info("self-heal is successful after replace-brick operation") @@ -791,7 +707,5 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass): self.assertTrue(ret, "IO failed on some of the clients") # List all files and dirs created - g.log.info("List all files and directories:") ret = list_all_files_and_dirs_mounts(self.mounts) self.assertTrue(ret, "Failed to list all files and dirs") - g.log.info("Listing all files and directories is successful") diff --git a/tests/functional/bvt/test_sosreport_interoperability.py b/tests/functional/bvt/test_sosreport_interoperability.py new file mode 100644 index 000000000..3f1081a57 --- /dev/null +++ b/tests/functional/bvt/test_sosreport_interoperability.py @@ -0,0 +1,141 @@ +# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com> +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +# pylint: disable=too-many-statements, too-many-locals + +from unittest import SkipTest +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError +from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on +from glustolibs.gluster.glusterdir import get_dir_contents + + +@runs_on([['arbiter', 'distributed-replicated', 'distributed-dispersed'], + ['glusterfs', 'cifs']]) +class ValidateSosreportBehavior(GlusterBaseClass): + """ + This testcase validates sosreport behavior with glusterfs + """ + def setUp(self): + # Calling GlusterBaseClass setUp + self.get_super_method(self, 'setUp')() + + self.all_mounts_procs = [] + self.io_validation_complete = False + + # Setup Volume and Mount Volume + ret = self.setup_volume_and_mount_volume(mounts=[self.mounts[0]], + volume_create_force=False) + if not ret: + raise ExecutionError("Failed to Setup_Volume and Mount_Volume") + + def tearDown(self): + """tearDown""" + ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]]) + if not ret: + raise ExecutionError("Failed to umount the vol & cleanup Volume") + self.get_super_method(self, 'tearDown')() + + def test_sosreport_behavior_for_glusterfs(self): + ''' + Test Steps: + 1) Download sos package if not installed + 2) Fetch Sos version for reference + 3) Note down all files in below locations before taking sosreport: + a) /var/run/gluster + b) /run/gluster + c) /var/lib/glusterd + d) /var/log/glusterfs + 4) Take the sosreport + 5) Again note down the list of all gluster file in locations mentioned + in step#3. The list of files in this step should match step#3 + 6) untar the sosreport to see if gluster files are packaged + ''' + + # Fetching sosreport version for information + ret, version, _ = g.run(self.servers[1], 'rpm -qa|grep sos') + if version[4:9] in ('3.8-6', '3.8-7', '3.8-8'): + raise SkipTest("Skipping testcase as bug is fixed in " + "sosreport version 3.8.9") + g.log.info("sos version is %s", version) + + # Noting down list of entries in gluster directories before sos + gluster_contents_before_sos = [] + gluster_dirs = ('/var/run/gluster*', '/run/gluster*', + '/var/lib/glusterd', '/var/log/glusterfs') + for gdir in gluster_dirs: + ret = get_dir_contents(self.servers[1], gdir, recursive=True) + gluster_contents_before_sos.append(ret) + + # Check for any existing sosreport + var_tmp_dircontents_before_sos = get_dir_contents(self.servers[1], + '/var/tmp/') + + # Collect sosreport + ret, _, err = g.run(self.servers[1], + 'sosreport --batch --name=$HOSTNAME') + self.assertEqual(ret, 0, "failed to fetch sosreport due to {}" + .format(err)) + + # Checking /var/tmp contents + var_tmp_dircontents_after_sos = get_dir_contents(self.servers[1], + '/var/tmp/') + + # Recheck if all gluster files still exist + gluster_contents_after_sos = [] + for gdir in gluster_dirs: + ret = get_dir_contents(self.servers[1], gdir, recursive=True) + gluster_contents_after_sos.append(ret) + + # Compare glusterfiles before and after taking sosreport + # There should be no difference in contents + # Ignoring /var/log/glusterfs ie last element of the list, to avoid + # false negatives as sosreport triggers heal which creates new logs + # and obvious difference in list of entries post sos + self.assertTrue((gluster_contents_before_sos[:-1] == + gluster_contents_after_sos[:-1]), + "Gluster files not matching before and after " + " sosreport generation {} and {}" + .format(gluster_contents_before_sos, + gluster_contents_after_sos)) + + # Untar sosreport to check if gluster files are captured + sosfile = list(set(var_tmp_dircontents_after_sos) - + set(var_tmp_dircontents_before_sos)) + sosfile.sort() + untar_sosfile_cmd = 'tar -xvf /var/tmp/' + sosfile[0] + ' -C /var/tmp/' + ret, _, err = g.run(self.servers[1], untar_sosfile_cmd) + self.assertEqual(ret, 0, "Untar failed due to {}".format(err)) + dirchecks = ('/var/lib/glusterd', '/var/log/glusterfs') + olddirs = [gluster_contents_after_sos[2], + gluster_contents_after_sos[3]] + ret = {} + for after, before in zip(dirchecks, olddirs): + untar_dirpath = '/var/tmp/' + sosfile[0][0:-7] + untardir = untar_dirpath + after + _ = get_dir_contents(self.servers[1], untardir, recursive=True) + ret[after] = list(x.split(untar_dirpath, 1)[-1] for x in _) + if before == gluster_contents_after_sos[2]: + self.assertTrue(bool(before == ret[after]), 'gluster ' + ' sosreport may be missing as they dont match ' + 'with actual contents') + else: + # Need this logic for var/log/glusterfs entries as rotated(.gz) + # logs are not collected by sos + self.assertTrue(all(entry in before for entry in ret[after]), + 'var-log-glusterfs entries in sosreport may be' + ' missing as they dont match with actual ' + 'contents') diff --git a/tests/functional/bvt/test_verify_volume_sanity.py b/tests/functional/bvt/test_verify_volume_sanity.py index 6f92a111b..2013d0b1d 100644 --- a/tests/functional/bvt/test_verify_volume_sanity.py +++ b/tests/functional/bvt/test_verify_volume_sanity.py @@ -32,11 +32,13 @@ from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on) class VerifyVolumeSanity(GlusterBaseClass): def setUp(self): + + # Calling GlusterBaseClass setUp + self.get_super_method(self, 'setUp')() + # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume and Mount Volume") ret = self.setup_volume_and_mount_volume(mounts=self.mounts) self.assertTrue(ret, ("Failed to Setup_Volume and Mount_Volume")) - g.log.info("Successful in Setup Volume and Mount Volume") def test_volume_sanity(self): """ @@ -78,7 +80,8 @@ class VerifyVolumeSanity(GlusterBaseClass): def tearDown(self): # Stopping the volume - g.log.info("Starting to Unmount Volume and Cleanup Volume") ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) self.assertTrue(ret, ("Failed to Unmount Volume and Cleanup Volume")) - g.log.info("Successful in Unmount Volume and Cleanup Volume") + + # Calling GlusterBaseClass tearDown + self.get_super_method(self, 'tearDown')() diff --git a/tests/functional/bvt/test_vvt.py b/tests/functional/bvt/test_vvt.py index 4ad7bfc6a..13ffd9dde 100644 --- a/tests/functional/bvt/test_vvt.py +++ b/tests/functional/bvt/test_vvt.py @@ -49,8 +49,6 @@ class VolumeAccessibilityTests(GlusterBaseClass): cls.get_super_method(cls, 'setUpClass')() # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on " - "mounts", cls.clients) cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") ret = upload_scripts(cls.clients, cls.script_upload_path) @@ -67,21 +65,17 @@ class VolumeAccessibilityTests(GlusterBaseClass): self.get_super_method(self, 'setUp')() # Setup_Volume - g.log.info("Starting to Setup Volume %s", self.volname) ret = self.setup_volume() if not ret: raise ExecutionError("Failed to Setup Volume %s" % self.volname) - g.log.info("Successful in Setup Volume %s", self.volname) def tearDown(self): """Cleanup the volume """ # Cleanup Volume - g.log.info("Starting to Setup Volume %s", self.volname) ret = self.cleanup_volume() if not ret: raise ExecutionError("Failed to Setup_Volume %s" % self.volname) - g.log.info("Successful in Setup Volume %s", self.volname) # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')() @@ -93,7 +87,6 @@ class VolumeAccessibilityTests(GlusterBaseClass): start of the volume. """ # Verify volume processes are online - g.log.info("Verify volume %s processes are online", self.volname) ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) @@ -101,27 +94,21 @@ class VolumeAccessibilityTests(GlusterBaseClass): self.volname) # Stop Volume - g.log.info("Stopping Volume %s", self.volname) ret, _, _ = volume_stop(self.mnode, self.volname, force=True) self.assertEqual(ret, 0, "Failed to stop volume %s" % self.volname) g.log.info("Successfully stopped volume %s", self.volname) # Start Volume - g.log.info("Starting Volume %s", self.volname) ret, _, _ = volume_start(self.mnode, self.volname) self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname) g.log.info("Successfully started volume %s", self.volname) # Wait for volume processes to be online - g.log.info("Wait for volume processes to be online") ret = wait_for_volume_process_to_be_online(self.mnode, self.volname) self.assertTrue(ret, ("Failed to wait for volume %s processes to " "be online", self.volname)) - g.log.info("Successful in waiting for volume %s processes to be " - "online", self.volname) # Log Volume Info and Status - g.log.info("Logging Volume %s Info and Status", self.volname) ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Failed to Log volume %s info and status", self.volname)) @@ -129,7 +116,6 @@ class VolumeAccessibilityTests(GlusterBaseClass): self.volname) # Verify volume's all process are online - g.log.info("Verify volume %s processes are online", self.volname) ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) @@ -137,7 +123,6 @@ class VolumeAccessibilityTests(GlusterBaseClass): self.volname) # Log Volume Info and Status - g.log.info("Logging Volume %s Info and Status", self.volname) ret = log_volume_info_and_status(self.mnode, self.volname) self.assertTrue(ret, ("Failed to Log volume %s info and status", self.volname)) @@ -145,8 +130,6 @@ class VolumeAccessibilityTests(GlusterBaseClass): self.volname) # Check if glusterd is running on all servers(expected: active) - g.log.info("Check if glusterd is running on all servers" - "(expected: active)") ret = is_glusterd_running(self.servers) self.assertEqual(ret, 0, "Glusterd is not running on all servers") g.log.info("Glusterd is running on all the servers") @@ -156,10 +139,8 @@ class VolumeAccessibilityTests(GlusterBaseClass): """Test File Directory Creation on the volume. """ # Mount Volume - g.log.info("Starting to Mount Volume %s", self.volname) ret = self.mount_volume(self.mounts) self.assertTrue(ret, ("Failed to Mount Volume %s", self.volname)) - g.log.info("Successful in Mounting Volume %s", self.volname) # Start IO on all mounts. all_mounts_procs = [] @@ -178,22 +159,16 @@ class VolumeAccessibilityTests(GlusterBaseClass): proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) all_mounts_procs.append(proc) - count = count + 10 + count += 10 # Validate IO - g.log.info("Validating IO's") ret = validate_io_procs(all_mounts_procs, self.mounts) self.assertTrue(ret, "IO failed on some of the clients") - g.log.info("Successfully validated all io's") # Get stat of all the files/dirs created. - g.log.info("Get stat of all the files/dirs created.") ret = get_mounts_stat(self.mounts) self.assertTrue(ret, "Stat failed on some of the clients") - g.log.info("Successfully got stat of all files/dirs created") # UnMount Volume - g.log.info("Starting to Unmount Volume %s", self.volname) ret = self.unmount_volume(self.mounts) self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname)) - g.log.info("Successfully Unmounted Volume %s", self.volname) |