From 1945bf99c2bd7f7552ed5c72924074f86fe81a7f Mon Sep 17 00:00:00 2001 From: vamahaja Date: Thu, 17 Sep 2020 10:45:02 +0530 Subject: [TestFix][Tier3] Add pytest marker for 'tier3' tests Change-Id: Idd21f2d0cfb2a1171e4e8464646684ac9164ad0e Signed-off-by: vamahaja --- tests/functional/arbiter/test_arbiter.py | 6 +++--- .../test_gluster_block_stability.py | 6 +++--- tests/functional/heketi/test_block_volumes_heketi.py | 2 +- tests/functional/heketi/test_disabling_device.py | 2 +- tests/functional/heketi/test_heketi_create_volume.py | 2 +- .../heketi/test_heketi_device_operations.py | 2 +- tests/functional/heketi/test_heketi_zones.py | 20 ++++++++++---------- tests/functional/logging/test_logging_validations.py | 4 ++-- .../provisioning/test_dynamic_provisioning_block.py | 8 ++++---- tests/functional/provisioning/test_pv_resize.py | 4 ++-- 10 files changed, 28 insertions(+), 28 deletions(-) (limited to 'tests/functional') diff --git a/tests/functional/arbiter/test_arbiter.py b/tests/functional/arbiter/test_arbiter.py index 884e7c4b..a1ec544a 100755 --- a/tests/functional/arbiter/test_arbiter.py +++ b/tests/functional/arbiter/test_arbiter.py @@ -395,7 +395,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass): "Arbiter brick '%s' was not verified. Looks like it was " "not found on any of gluster PODs/nodes." % brick["name"]) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( (False, False, True, True), (True, True, False, False), @@ -483,7 +483,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass): self.assertIn( data_brick.split(':')[0], data_nodes_ip_addresses) - @pytest.mark.tier2 + @pytest.mark.tier3 def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self): """Validate reuse of volume space after deletion of PVCs""" min_storage_gb = 10 @@ -909,7 +909,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass): "{}".format(arbiter_brick_ip)) self.assertGreaterEqual(len(arbiter_brick_ip), 1, err_msg) - @pytest.mark.tier1 + @pytest.mark.tier3 @podcmd.GlustoPod() def test_arbiter_volume_delete_using_pvc_mounted_on_app_pod(self): """Test Arbiter volume delete using a pvc when volume is mounted diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py index be2f2428..1f09fb97 100644 --- a/tests/functional/gluster_stability/test_gluster_block_stability.py +++ b/tests/functional/gluster_stability/test_gluster_block_stability.py @@ -946,7 +946,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.heketi_client_node, self.heketi_server_url) self.assertNotIn(vol_name_prefix, h_vol_list) - @pytest.mark.tier1 + @pytest.mark.tier3 def test_path_failures_on_initiator_node_migration_and_pod_restart(self): """Verify path failures on initiator node migration and app pod restart. Also, make sure that existing @@ -1511,7 +1511,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): .format(final_free_storage, initial_free_storage)) raise AssertionError(err_msg) - @pytest.mark.tier1 + @pytest.mark.tier3 @podcmd.GlustoPod() def test_delete_block_device_pvc_while_io_in_progress(self): """Delete block device or pvc while io is in progress""" @@ -1694,7 +1694,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.assertNotIn( vol_id, blockvolume_list, msg % (vol_id, blockvolume_list)) - @pytest.mark.tier2 + @pytest.mark.tier3 @podcmd.GlustoPod() def test_pvc_state_when_node_is_power_on_and_off(self): """Verify PVC gets bound after gluster node is powered off and on diff --git a/tests/functional/heketi/test_block_volumes_heketi.py b/tests/functional/heketi/test_block_volumes_heketi.py index cbea98ae..694a45ed 100644 --- a/tests/functional/heketi/test_block_volumes_heketi.py +++ b/tests/functional/heketi/test_block_volumes_heketi.py @@ -373,7 +373,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass): # Check if all blockhosting volumes are deleted from heketi self.assertFalse(new_bhv_list) - @pytest.mark.tier2 + @pytest.mark.tier3 @podcmd.GlustoPod() def test_targetcli_when_block_hosting_volume_down(self): """Validate no inconsistencies occur in targetcli when block volumes diff --git a/tests/functional/heketi/test_disabling_device.py b/tests/functional/heketi/test_disabling_device.py index 5c287d43..74a6c3f7 100644 --- a/tests/functional/heketi/test_disabling_device.py +++ b/tests/functional/heketi/test_disabling_device.py @@ -8,7 +8,7 @@ from openshiftstoragelibs import podcmd class TestDisableHeketiDevice(baseclass.BaseClass): - @pytest.mark.tier2 + @pytest.mark.tier3 @podcmd.GlustoPod() def test_create_volumes_enabling_and_disabling_heketi_devices(self): """Validate enable/disable of heketi device""" diff --git a/tests/functional/heketi/test_heketi_create_volume.py b/tests/functional/heketi/test_heketi_create_volume.py index f7fb6126..f061c423 100644 --- a/tests/functional/heketi/test_heketi_create_volume.py +++ b/tests/functional/heketi/test_heketi_create_volume.py @@ -298,7 +298,7 @@ class TestHeketiVolume(BaseClass): self.heketi_client_node, heketi_url, node_id, json=True) self.assertEqual(node_info['state'].lower(), 'online') - @pytest.mark.tier1 + @pytest.mark.tier3 def test_blockvolume_create_no_free_space(self): """Validate error is returned when free capacity is exhausted""" diff --git a/tests/functional/heketi/test_heketi_device_operations.py b/tests/functional/heketi/test_heketi_device_operations.py index c8a9b991..a6831e98 100755 --- a/tests/functional/heketi/test_heketi_device_operations.py +++ b/tests/functional/heketi/test_heketi_device_operations.py @@ -339,7 +339,7 @@ class TestHeketiDeviceOperations(BaseClass): is_delete_device, deleted_device, node_id, add_back_again, skip_cleanup_addition=True) - @pytest.mark.tier2 + @pytest.mark.tier3 def test_heketi_device_removal_with_insuff_space(self): """Validate heketi with device removal insufficient space""" diff --git a/tests/functional/heketi/test_heketi_zones.py b/tests/functional/heketi/test_heketi_zones.py index a5e6fd3b..e1cd5dd4 100644 --- a/tests/functional/heketi/test_heketi_zones.py +++ b/tests/functional/heketi/test_heketi_zones.py @@ -227,7 +227,7 @@ class TestHeketiZones(baseclass.BaseClass): new_env_list = command.cmd_run(cmd_list_env, hostname=self.node) self.assertIn(env, new_env_list, "Failed to set env {}".format(env)) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( (1, "none"), (2, "none"), @@ -282,7 +282,7 @@ class TestHeketiZones(baseclass.BaseClass): # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( (1, "none"), (2, "none"), @@ -341,7 +341,7 @@ class TestHeketiZones(baseclass.BaseClass): # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( (3, "strict"), (1, "none"), @@ -379,7 +379,7 @@ class TestHeketiZones(baseclass.BaseClass): # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( (3, "strict"), (1, "none"), @@ -419,7 +419,7 @@ class TestHeketiZones(baseclass.BaseClass): # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data(3, 4) def test_pvc_placement_with_zone_check_set_in_dc(self, zone_count): heketi_zone_checking = "strict" @@ -447,7 +447,7 @@ class TestHeketiZones(baseclass.BaseClass): # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data(3, 4) def test_check_arbiter_pvc_placement_zone_check_in_dc(self, zone_count): heketi_zone_checking = "strict" @@ -483,7 +483,7 @@ class TestHeketiZones(baseclass.BaseClass): # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( (1, False), (1, True), @@ -526,7 +526,7 @@ class TestHeketiZones(baseclass.BaseClass): # Create app DC with the above PVC self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( ("strict", "strict"), ("none", "strict"), @@ -643,7 +643,7 @@ class TestHeketiZones(baseclass.BaseClass): return app_pods - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( (3, False), (3, True), @@ -733,7 +733,7 @@ class TestHeketiZones(baseclass.BaseClass): openshift_ops.wait_for_pod_be_ready( self.node, pod_name, timeout=5, wait_step=2) - @pytest.mark.tier1 + @pytest.mark.tier3 @ddt.data( (3, False), (3, True), diff --git a/tests/functional/logging/test_logging_validations.py b/tests/functional/logging/test_logging_validations.py index 05a92fbc..8c5d31fb 100644 --- a/tests/functional/logging/test_logging_validations.py +++ b/tests/functional/logging/test_logging_validations.py @@ -110,7 +110,7 @@ class TestLoggingAndGlusterRegistryValidation(GlusterBlockBaseClass): g_new_pod = self._get_newly_deployed_gluster_pod(g_pod_list_before) openshift_ops.wait_for_pod_be_ready(self._master, g_new_pod[0]) - @pytest.mark.tier2 + @pytest.mark.tier3 def test_validate_logging_pods_and_pvc(self): """Validate metrics pods and PVC""" @@ -143,7 +143,7 @@ class TestLoggingAndGlusterRegistryValidation(GlusterBlockBaseClass): heketi_server_url=self._registry_heketi_server_url, is_registry_gluster=True) - @pytest.mark.tier2 + @pytest.mark.tier3 def test_logging_es_pod_pvc_all_freespace_utilization(self): """Validate logging by utilizing all the free space of block PVC bound to elsaticsearch pod""" diff --git a/tests/functional/provisioning/test_dynamic_provisioning_block.py b/tests/functional/provisioning/test_dynamic_provisioning_block.py index 215354da..6f25ea47 100755 --- a/tests/functional/provisioning/test_dynamic_provisioning_block.py +++ b/tests/functional/provisioning/test_dynamic_provisioning_block.py @@ -467,7 +467,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass): "only %s free space is available" % (free_space, free_size)) - @pytest.mark.tier1 + @pytest.mark.tier3 def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg( self): """Verify that block volume creation fails when we create block @@ -523,7 +523,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass): verify_pvc_status_is_bound(self.node, pvc_name) - @pytest.mark.tier1 + @pytest.mark.tier3 def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_pos( self): """Verify that block volume creation succeed when we create BHV @@ -750,7 +750,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass): # Wait for all the PVCs to be in bound state wait_for_pvcs_be_bound(self.node, pvc_names, timeout=300, wait_step=5) - @pytest.mark.tier2 + @pytest.mark.tier3 def test_creation_of_pvc_when_one_node_is_down(self): """Test PVC creation when one node is down than hacount""" node_count = len(self.gluster_servers) @@ -800,7 +800,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass): sc_name = self.create_storage_class(hacount=(node_count - 1)) self.create_and_wait_for_pvc(sc_name=sc_name) - @pytest.mark.tier1 + @pytest.mark.tier3 def test_heketi_block_volume_create_with_size_more_than_bhv_free_space( self): """ Test to create heketi block volume of size greater than diff --git a/tests/functional/provisioning/test_pv_resize.py b/tests/functional/provisioning/test_pv_resize.py index a7baca00..dacdd992 100644 --- a/tests/functional/provisioning/test_pv_resize.py +++ b/tests/functional/provisioning/test_pv_resize.py @@ -238,7 +238,7 @@ class TestPvResizeClass(BaseClass): self.assertEqual( ret, 0, "Failed to write data on the expanded PVC") - @pytest.mark.tier2 + @pytest.mark.tier3 def test_pv_resize_no_free_space(self): """Validate PVC resize fails if there is no free space available""" if not self.is_containerized_gluster(): @@ -392,7 +392,7 @@ class TestPvResizeClass(BaseClass): raise ExecutionError("Failed to run io, error {}".format(str(err))) @skip("Blocked by BZ-1547069") - @pytest.mark.tier2 + @pytest.mark.tier3 def test_pvc_resize_size_greater_than_available_space(self): """Re-size PVC to greater value than available volume size and then expand volume to support maximum size. -- cgit