From 1f952c96eba1f920a85d00b310028a5f854e2c3d Mon Sep 17 00:00:00 2001 From: Valerii Ponomarov Date: Wed, 28 Nov 2018 01:47:06 +0530 Subject: Move functions with creation of SC, PVC and POD objects to common place Removing duplication of code. Also, reuse it in places where it is already duplicated. Change-Id: I2f88b4921cb7bec01d17e1ddeaeda16e0d198493 --- tests/functional/common/arbiter/test_arbiter.py | 62 ++------ .../test_dynamic_provisioning_block_p0_cases.py | 161 +++++---------------- .../test_dynamic_provisioning_p0_cases.py | 146 ++++--------------- .../common/provisioning/test_pv_resize.py | 63 ++------ 4 files changed, 90 insertions(+), 342 deletions(-) (limited to 'tests') diff --git a/tests/functional/common/arbiter/test_arbiter.py b/tests/functional/common/arbiter/test_arbiter.py index 1cd7d134..2d67db16 100644 --- a/tests/functional/common/arbiter/test_arbiter.py +++ b/tests/functional/common/arbiter/test_arbiter.py @@ -6,8 +6,6 @@ from cnslibs.common.openshift_ops import ( get_gluster_vol_info_by_pvc_name, get_ocp_gluster_pod_names, oc_create_pvc, - oc_create_sc, - oc_create_secret, oc_create_tiny_pod_with_volume, oc_delete, verify_pvc_status_is_bound, @@ -79,49 +77,14 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): self.addCleanup(heketi_ops.rm_arbiter_tag, node, server_url, source, source_id) - def _create_storage_class(self, avg_file_size=None): - # Create secret file for usage in storage class - self.secret_name = oc_create_secret( - self.node, namespace=self.sc.get('secretnamespace', 'default'), - data_key=self.heketi_cli_key, - secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) - self.addCleanup( - oc_delete, self.node, 'secret', self.secret_name) - - vol_options = "user.heketi.arbiter true" - if avg_file_size: - vol_options += ",user.heketi.average-file-size %s" % avg_file_size - - # Create storage class - self.sc_name = oc_create_sc( - self.node, resturl=self.sc['resturl'], - restuser=self.sc['restuser'], - secretnamespace=self.sc['secretnamespace'], - secretname=self.secret_name, - volumeoptions=vol_options, - ) - self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) - - def _create_and_wait_for_pvc(self, pvc_size=1): - # Create PVC - self.pvc_name = oc_create_pvc( - self.node, self.sc_name, pvc_name_prefix='arbiter-pvc', - pvc_size=pvc_size) - self.addCleanup( - wait_for_resource_absence, self.node, 'pvc', self.pvc_name) - self.addCleanup(oc_delete, self.node, 'pvc', self.pvc_name) - - # Wait for PVC to be in bound state - verify_pvc_status_is_bound(self.node, self.pvc_name) - def test_arbiter_pvc_create(self): """Test case CNS-944""" # Create sc with gluster arbiter info - self._create_storage_class() + self.create_storage_class(is_arbiter_vol=True) # Create PVC and wait for it to be in 'Bound' state - self._create_and_wait_for_pvc() + self.create_and_wait_for_pvc() # Get vol info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) @@ -150,10 +113,10 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): """Test case CNS-945""" # Create sc with gluster arbiter info - self._create_storage_class() + self.create_storage_class(is_arbiter_vol=True) # Create PVC and wait for it to be in 'Bound' state - self._create_and_wait_for_pvc() + self.create_and_wait_for_pvc() # Create POD with attached volume mount_path = "/mnt" @@ -278,7 +241,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): break # Create sc with gluster arbiter info - self._create_storage_class() + self.create_storage_class(is_arbiter_vol=True) # Create helper arbiter vol if not all the data devices have # half of required free space. @@ -308,7 +271,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) - self._create_and_wait_for_pvc( + self.create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable( @@ -317,7 +280,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): self.assertTrue(out) # Create target arbiter volume - self._create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) + self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2)) # Get gluster volume info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) @@ -361,10 +324,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): """Test cases CNS-1182-1190""" # Create sc with gluster arbiter info - self._create_storage_class(avg_file_size) + self.create_storage_class( + is_arbiter_vol=True, arbiter_avg_file_size=avg_file_size) # Create PVC and wait for it to be in 'Bound' state - self._create_and_wait_for_pvc(pvc_size_gb) + self.create_and_wait_for_pvc(pvc_size_gb) # Get volume info vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name) @@ -479,9 +443,9 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): data_nodes.append(node_info) # Create PVCs and check that their bricks are correctly located - self._create_storage_class() + self.create_storage_class(is_arbiter_vol=True) for i in range(pvc_amount): - self._create_and_wait_for_pvc(1) + self.create_and_wait_for_pvc(1) # Get gluster volume info vol_info = get_gluster_vol_info_by_pvc_name( @@ -556,7 +520,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1 # Create sc with gluster arbiter info - self._create_storage_class() + self.create_storage_class(is_arbiter_vol=True) # Create and delete 3 small volumes concurrently pvc_names = [] diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py index 81fec14e..5edbdc50 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py @@ -9,9 +9,6 @@ from cnslibs.common.openshift_ops import ( get_pv_name_from_pvc, get_pvc_status, oc_create_app_dc_with_io, - oc_create_secret, - oc_create_sc, - oc_create_pvc, oc_delete, oc_get_custom_resource, oc_rsh, @@ -37,108 +34,18 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): def setUp(self): super(TestDynamicProvisioningBlockP0, self).setUp() self.node = self.ocp_master_node[0] - self.sc = self.cns_storage_class.get( - 'storage_class2', - self.cns_storage_class.get('block_storage_class')) - - def _create_storage_class(self, hacount=True, create_name_prefix=False, - reclaim_policy="Delete"): - # Create secret file - self.secret_name = oc_create_secret( - self.node, - namespace=self.sc.get('restsecretnamespace', 'default'), - data_key=self.heketi_cli_key, - secret_type=self.sc.get('provisioner', 'gluster.org/glusterblock')) - self.addCleanup(oc_delete, self.node, 'secret', self.secret_name) - - # create storage class - kwargs = { - "provisioner": "gluster.org/glusterblock", - "resturl": self.sc['resturl'], - "restuser": self.sc['restuser'], - "restsecretnamespace": self.sc['restsecretnamespace'], - "restsecretname": self.secret_name - } - if hacount: - kwargs["hacount"] = self.sc['hacount'] - if create_name_prefix: - kwargs["volumenameprefix"] = self.sc.get( - 'volumenameprefix', 'autotest-blk') - - self.sc_name = oc_create_sc( - self.node, reclaim_policy=reclaim_policy, **kwargs) - self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) - - return self.sc_name - - def _create_and_wait_for_pvcs(self, pvc_size=1, - pvc_name_prefix='autotests-block-pvc', - pvc_amount=1): - # Create PVCs - pvc_names = [] - for i in range(pvc_amount): - pvc_name = oc_create_pvc( - self.node, self.sc_name, pvc_name_prefix=pvc_name_prefix, - pvc_size=pvc_size) - pvc_names.append(pvc_name) - self.addCleanup( - wait_for_resource_absence, self.node, 'pvc', pvc_name) - - # Wait for PVCs to be in bound state - try: - for pvc_name in pvc_names: - verify_pvc_status_is_bound(self.node, pvc_name) - finally: - reclaim_policy = oc_get_custom_resource( - self.node, 'sc', ':.reclaimPolicy', self.sc_name)[0] - - for pvc_name in pvc_names: - if reclaim_policy == 'Retain': - pv_name = get_pv_name_from_pvc(self.node, pvc_name) - self.addCleanup(oc_delete, self.node, 'pv', pv_name, - raise_on_absence=False) - custom = (r':.metadata.annotations."gluster\.kubernetes' - r'\.io\/heketi\-volume\-id"') - vol_id = oc_get_custom_resource( - self.node, 'pv', custom, pv_name)[0] - self.addCleanup(heketi_blockvolume_delete, - self.heketi_client_node, - self.heketi_server_url, vol_id) - self.addCleanup(oc_delete, self.node, 'pvc', pvc_name, - raise_on_absence=False) - - return pvc_names - - def _create_and_wait_for_pvc(self, pvc_size=1, - pvc_name_prefix='autotests-block-pvc'): - self.pvc_name = self._create_and_wait_for_pvcs( - pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix)[0] - return self.pvc_name - - def _create_dc_with_pvc(self, hacount=True, create_name_prefix=False): - # Create storage class and secret objects - self._create_storage_class( - hacount, create_name_prefix=create_name_prefix) - - # Create PVC - pvc_name = self._create_and_wait_for_pvc() - - # Create DC with POD and attached PVC to it - dc_name = oc_create_app_dc_with_io(self.node, pvc_name) - self.addCleanup(oc_delete, self.node, 'dc', dc_name) - self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) - pod_name = get_pod_name_from_dc(self.node, dc_name) - wait_for_pod_be_ready(self.node, pod_name) - - return dc_name, pod_name, pvc_name def dynamic_provisioning_glusterblock( - self, hacount=True, create_name_prefix=False): + self, set_hacount, create_vol_name_prefix=False): datafile_path = '/mnt/fake_file_for_%s' % self.id() # Create DC with attached PVC - dc_name, pod_name, pvc_name = self._create_dc_with_pvc( - hacount, create_name_prefix=create_name_prefix) + sc_name = self.create_storage_class( + set_hacount=set_hacount, + create_vol_name_prefix=create_vol_name_prefix) + pvc_name = self.create_and_wait_for_pvc( + pvc_name_prefix='autotest-block', sc_name=sc_name) + dc_name, pod_name = self.create_dc_with_pvc(pvc_name) # Check that we can write data for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100", @@ -152,18 +59,20 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): def test_dynamic_provisioning_glusterblock_hacount_true(self): """ CNS-435 dynamic provisioning glusterblock """ - self.dynamic_provisioning_glusterblock() + self.dynamic_provisioning_glusterblock(set_hacount=True) def test_dynamic_provisioning_glusterblock_hacount_false(self): """ CNS-716 storage-class mandatory parameters for block """ - self.dynamic_provisioning_glusterblock(hacount=False) + self.dynamic_provisioning_glusterblock(set_hacount=False) def test_dynamic_provisioning_glusterblock_heketipod_failure(self): datafile_path = '/mnt/fake_file_for_%s' % self.id() # Create DC with attached PVC - app_1_dc_name, app_1_pod_name, app_1_pvc_name = ( - self._create_dc_with_pvc()) + sc_name = self.create_storage_class() + app_1_pvc_name = self.create_and_wait_for_pvc( + pvc_name_prefix='autotest-block', sc_name=sc_name) + app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name) # Write test data write_data_cmd = ( @@ -185,12 +94,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): wait_for_resource_absence(self.node, 'pod', heketi_pod_name) # Create second PVC - app_2_pvc_name = oc_create_pvc( - self.node, self.sc_name, pvc_name_prefix='autotests-block-pvc', - pvc_size=1) - self.addCleanup( - wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name) - self.addCleanup(oc_delete, self.node, 'pvc', app_2_pvc_name) + app_2_pvc_name = self.create_and_wait_for_pvc( + pvc_name_prefix='autotests-block2') # Check status of the second PVC after small pause time.sleep(2) @@ -232,7 +137,10 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): datafile_path = '/mnt/fake_file_for_%s' % self.id() # Create DC with attached PVC - dc_name, pod_name, pvc_name = self._create_dc_with_pvc() + sc_name = self.create_storage_class() + pvc_name = self.create_and_wait_for_pvc( + pvc_name_prefix='autotest-block', sc_name=sc_name) + dc_name, pod_name = self.create_dc_with_pvc(pvc_name) # Run IO in background io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % ( @@ -278,11 +186,9 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): gb_prov_name, gb_prov_status = out.split() self.assertEqual(gb_prov_status, 'Running') - # Create storage class and secret objects - self._create_storage_class() - - # Create PVC - self._create_and_wait_for_pvc() + # Create Secret, SC and PVC + self.create_storage_class() + self.create_and_wait_for_pvc() # Get list of Gluster PODs g_pod_list_cmd = ( @@ -313,10 +219,9 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self): """ Delete PVC's when heketi is down CNS-439 """ - # Create storage class and secret objects - self._create_storage_class() - - self.pvc_name_list = self._create_and_wait_for_pvcs( + # Create Secret, SC and PVCs + self.create_storage_class() + self.pvc_name_list = self.create_and_wait_for_pvcs( 1, 'pvc-heketi-down', 3) # remove heketi-pod @@ -347,14 +252,17 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): interval=1, timeout=120) # create a new PVC - self._create_and_wait_for_pvc() + self.create_and_wait_for_pvc() def test_recreate_app_pod_with_attached_block_pv(self): """Test Case CNS-1392""" datafile_path = '/mnt/temporary_test_file' # Create DC with POD and attached PVC to it - dc_name, pod_name, pvc_name = self._create_dc_with_pvc() + sc_name = self.create_storage_class() + pvc_name = self.create_and_wait_for_pvc( + pvc_name_prefix='autotest-block', sc_name=sc_name) + dc_name, pod_name = self.create_dc_with_pvc(pvc_name) # Write data write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000" @@ -377,7 +285,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): def test_volname_prefix_glusterblock(self): # CNS-926 - custom_volname_prefix_blockvol - self.dynamic_provisioning_glusterblock(create_name_prefix=True) + self.dynamic_provisioning_glusterblock( + set_hacount=False, create_vol_name_prefix=True) pv_name = get_pv_name_from_pvc(self.node, self.pvc_name) vol_name = oc_get_custom_resource( @@ -390,13 +299,13 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): self.assertIn(vol_name, block_vol_list) self.assertTrue(vol_name.startswith( - self.sc.get('volumenameprefix', 'autotest-blk'))) + self.sc.get('volumenameprefix', 'autotest'))) def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self): # CNS-1391 - Retain policy - gluster-block - delete pvc - self._create_storage_class(reclaim_policy='Retain') - self._create_and_wait_for_pvc() + self.create_storage_class(reclaim_policy='Retain') + self.create_and_wait_for_pvc() dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name) diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py index 6d789aa3..ebc33665 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py @@ -13,8 +13,8 @@ from cnslibs.common.openshift_ops import ( get_pod_names_from_dc, oc_create_secret, oc_create_sc, - oc_create_pvc, oc_create_app_dc_with_io, + oc_create_pvc, oc_create_tiny_pod_with_volume, oc_delete, oc_get_custom_resource, @@ -40,85 +40,14 @@ class TestDynamicProvisioningP0(CnsBaseClass): def setUp(self): super(TestDynamicProvisioningP0, self).setUp() self.node = self.ocp_master_node[0] - self.sc = self.cns_storage_class.get( - 'storage_class1', self.cns_storage_class.get('file_storage_class')) - - def _create_storage_class( - self, create_name_prefix=False, reclaim_policy='Delete'): - - # Create secret file for usage in storage class - self.secret_name = oc_create_secret( - self.node, - namespace=self.sc.get('secretnamespace', 'default'), - data_key=self.heketi_cli_key, - secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) - self.addCleanup( - oc_delete, self.node, 'secret', self.secret_name) - # Create storage class - self.sc_name = oc_create_sc( - self.node, - reclaim_policy=reclaim_policy, - resturl=self.sc['resturl'], - restuser=self.sc['restuser'], - secretnamespace=self.sc['secretnamespace'], - secretname=self.secret_name, - **({"volumenameprefix": self.sc['volumenameprefix']} - if create_name_prefix else {}) - ) - self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) - - def _create_and_wait_for_pvcs(self, pvc_size=1, - pvc_name_prefix='autotests-pvc', - pvc_amount=1): - # Create PVCs - pvc_names = [] - for i in range(pvc_amount): - pvc_name = oc_create_pvc( - self.node, self.sc_name, pvc_name_prefix=pvc_name_prefix, - pvc_size=pvc_size) - pvc_names.append(pvc_name) - self.addCleanup( - wait_for_resource_absence, self.node, 'pvc', pvc_name) - - # Wait for PVCs to be in bound state - try: - for pvc_name in pvc_names: - verify_pvc_status_is_bound(self.node, pvc_name) - finally: - reclaim_policy = oc_get_custom_resource( - self.node, 'sc', ':.reclaimPolicy', self.sc_name)[0] - - for pvc_name in pvc_names: - if reclaim_policy == 'Retain': - pv_name = get_pv_name_from_pvc(self.node, pvc_name) - self.addCleanup(oc_delete, self.node, 'pv', pv_name, - raise_on_absence=False) - custom = (r':.metadata.annotations."gluster\.kubernetes' - r'\.io\/heketi\-volume\-id"') - vol_id = oc_get_custom_resource( - self.node, 'pv', custom, pv_name)[0] - self.addCleanup(heketi_volume_delete, - self.heketi_client_node, - self.heketi_server_url, vol_id, - raise_on_error=False) - self.addCleanup(oc_delete, self.node, 'pvc', pvc_name, - raise_on_absence=False) - - return pvc_names - - def _create_and_wait_for_pvc(self, pvc_size=1, - pvc_name_prefix='autotests-pvc'): - self.pvc_name = self._create_and_wait_for_pvcs( - pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix)[0] - return self.pvc_name - - def dynamic_provisioning_glusterfile(self, heketi_volname_prefix=False): + def dynamic_provisioning_glusterfile(self, create_vol_name_prefix): # Create secret and storage class - self._create_storage_class(heketi_volname_prefix) + self.create_storage_class( + create_vol_name_prefix=create_vol_name_prefix) # Create PVC - pvc_name = self._create_and_wait_for_pvc() + pvc_name = self.create_and_wait_for_pvc() # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(self.node, pvc_name) @@ -129,7 +58,7 @@ class TestDynamicProvisioningP0(CnsBaseClass): wait_for_pod_be_ready(self.node, pod_name) # Verify Heketi volume name for prefix presence if provided - if heketi_volname_prefix: + if create_vol_name_prefix: ret = verify_volume_name_prefix(self.node, self.sc['volumenameprefix'], self.sc['secretnamespace'], @@ -160,10 +89,10 @@ class TestDynamicProvisioningP0(CnsBaseClass): datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id()) # Create secret and storage class - self._create_storage_class() + self.create_storage_class() # Create PVC - app_1_pvc_name = self._create_and_wait_for_pvc() + app_1_pvc_name = self.create_and_wait_for_pvc() # Create app POD with attached volume app_1_pod_name = oc_create_tiny_pod_with_volume( @@ -197,12 +126,7 @@ class TestDynamicProvisioningP0(CnsBaseClass): wait_for_resource_absence(self.node, 'pod', heketi_pod_name) # Create second PVC - app_2_pvc_name = oc_create_pvc( - self.node, self.sc_name, pvc_name_prefix='autotests-pvc', - pvc_size=1) - self.addCleanup( - wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name) - self.addCleanup(oc_delete, self.node, 'pvc', app_2_pvc_name) + app_2_pvc_name = self.create_and_wait_for_pvc() # Check status of the second PVC after small pause time.sleep(2) @@ -246,10 +170,10 @@ class TestDynamicProvisioningP0(CnsBaseClass): datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id()) # Create secret and storage class - self._create_storage_class() + self.create_storage_class() # Create PVC - pvc_name = self._create_and_wait_for_pvc() + pvc_name = self.create_and_wait_for_pvc() # Create app POD with attached volume pod_name = oc_create_tiny_pod_with_volume( @@ -309,19 +233,16 @@ class TestDynamicProvisioningP0(CnsBaseClass): oc_delete, self.node, 'secret', self.secret_name) # create storage class with mandatory parameters only - self.sc_name = oc_create_sc( + sc_name = oc_create_sc( self.node, provisioner='kubernetes.io/glusterfs', resturl=self.sc['resturl'], restuser=self.sc['restuser'], secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name ) - self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) + self.addCleanup(oc_delete, self.node, 'sc', sc_name) # Create PVC - pvc_name = oc_create_pvc(self.node, self.sc_name) - self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name) - self.addCleanup(oc_delete, self.node, 'pvc', pvc_name) - verify_pvc_status_is_bound(self.node, pvc_name) + pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name) # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(self.node, pvc_name) @@ -351,10 +272,9 @@ class TestDynamicProvisioningP0(CnsBaseClass): def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self): """ Delete PVC's when heketi is down CNS-438 """ - # Create storage class and secret objects - self._create_storage_class() - - self.pvc_name_list = self._create_and_wait_for_pvcs( + # Create storage class, secret and PVCs + self.create_storage_class() + self.pvc_name_list = self.create_and_wait_for_pvcs( 1, 'pvc-heketi-down', 3) # remove heketi-pod @@ -385,17 +305,15 @@ class TestDynamicProvisioningP0(CnsBaseClass): interval=1, timeout=120) # create a new PVC - self._create_and_wait_for_pvc() + self.create_and_wait_for_pvc() def test_validate_pvc_in_multiple_app_pods(self): """Test case CNS-574""" replicas = 5 - # Create secret and storage class - self._create_storage_class() - # Create PVC - pvc_name = self._create_and_wait_for_pvc() + sc_name = self.create_storage_class() + pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name) # Create DC with application PODs dc_name = oc_create_app_dc_with_io( @@ -419,20 +337,14 @@ class TestDynamicProvisioningP0(CnsBaseClass): self.assertIn("temp_%s" % pod_name, ls_out) def test_pvc_deletion_while_pod_is_running(self): - # CNS-584 Verify PVC deletion while pod is running - - self._create_storage_class() - self._create_and_wait_for_pvc() + """Test case CNS-584 - Verify PVC deletion while pod is running""" - # Create DC with POD and attached PVC to it. - dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name) - self.addCleanup(oc_delete, self.node, 'dc', dc_name) - self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) - - pod_name = get_pod_name_from_dc(self.node, dc_name) - wait_for_pod_be_ready(self.node, pod_name, timeout=300, wait_step=10) + # Create DC with POD and attached PVC to it + sc_name = self.create_storage_class() + pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name) + dc_name, pod_name = self.create_dc_with_pvc(pvc_name) - # delete PVC + # Delete PVC oc_delete(self.node, 'pvc', self.pvc_name) with self.assertRaises(ExecutionError): @@ -450,8 +362,8 @@ class TestDynamicProvisioningP0(CnsBaseClass): def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self): # CNS-1390 - Retain policy - glusterfs - delete pvc - self._create_storage_class(reclaim_policy='Retain') - self._create_and_wait_for_pvc() + self.create_storage_class(reclaim_policy='Retain') + self.create_and_wait_for_pvc() # get the name of the volume pv_name = get_pv_name_from_pvc(self.node, self.pvc_name) @@ -524,7 +436,7 @@ class TestDynamicProvisioningP0(CnsBaseClass): # Create new SC prefix = "autotests-default-sc" - self._create_storage_class(prefix) + self.create_storage_class(sc_name_prefix=prefix) # Make new SC be the default one and sleep for 1 sec to avoid races self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true')) diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py index 5412b5fd..353aa220 100644 --- a/tests/functional/common/provisioning/test_pv_resize.py +++ b/tests/functional/common/provisioning/test_pv_resize.py @@ -7,16 +7,12 @@ from cnslibs.common.openshift_ops import ( get_pod_name_from_dc, get_pv_name_from_pvc, oc_create_app_dc_with_io, - oc_create_pvc, - oc_create_secret, - oc_create_sc, oc_delete, oc_rsh, oc_version, scale_dc_pod_amount_and_wait, verify_pv_size, verify_pvc_size, - verify_pvc_status_is_bound, wait_for_events, wait_for_pod_be_ready, wait_for_resource_absence) @@ -46,45 +42,19 @@ class TestPvResizeClass(CnsBaseClass): "version %s " % self.version) g.log.error(msg) raise self.skipTest(msg) - self.sc = self.cns_storage_class.get( - 'storage_class1', self.cns_storage_class.get('file_storage_class')) - - def _create_storage_class(self, volname_prefix=False): - # create secret - self.secret_name = oc_create_secret( - self.node, - namespace=self.sc.get('secretnamespace', 'default'), - data_key=self.heketi_cli_key, - secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) - self.addCleanup(oc_delete, self.node, 'secret', self.secret_name) - - # create storageclass - self.sc_name = oc_create_sc( - self.node, provisioner='kubernetes.io/glusterfs', - resturl=self.sc['resturl'], restuser=self.sc['restuser'], - secretnamespace=self.sc['secretnamespace'], - secretname=self.secret_name, - allow_volume_expansion=True, - **({"volumenameprefix": self.sc['volumenameprefix']} - if volname_prefix else {}) - ) - self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) - - return self.sc_name @ddt.data(False, True) - def test_pv_resize_with_prefix_for_name(self, volname_prefix=False): + def test_pv_resize_with_prefix_for_name(self, + create_vol_name_prefix=False): """testcases CNS-1037 and CNS-1038 """ dir_path = "/mnt/" - self._create_storage_class(volname_prefix) - node = self.ocp_master_node[0] + node = self.ocp_client[0] # Create PVC - pvc_name = oc_create_pvc(node, self.sc_name, pvc_size=1) - self.addCleanup(wait_for_resource_absence, - node, 'pvc', pvc_name) - self.addCleanup(oc_delete, node, 'pvc', pvc_name) - verify_pvc_status_is_bound(node, pvc_name) + self.create_storage_class( + allow_volume_expansion=True, + create_vol_name_prefix=create_vol_name_prefix) + pvc_name = self.create_and_wait_for_pvc() # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(node, pvc_name) @@ -94,7 +64,7 @@ class TestPvResizeClass(CnsBaseClass): pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) - if volname_prefix: + if create_vol_name_prefix: ret = heketi_ops.verify_volume_name_prefix( node, self.sc['volumenameprefix'], self.sc['secretnamespace'], @@ -172,11 +142,8 @@ class TestPvResizeClass(CnsBaseClass): available_size_gb = int(min(nodes.values()) / (1024**2)) # Create PVC - self._create_storage_class() - pvc_name = oc_create_pvc(self.node, self.sc_name, pvc_size=pvc_size_gb) - self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name) - self.addCleanup(oc_delete, self.node, 'pvc', pvc_name) - verify_pvc_status_is_bound(self.node, pvc_name) + self.create_storage_class(allow_volume_expansion=True) + pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb) # Create DC with POD and attached PVC to it dc_name = oc_create_app_dc_with_io(self.node, pvc_name) @@ -234,16 +201,12 @@ class TestPvResizeClass(CnsBaseClass): def test_pv_resize_try_shrink_pv_size(self): """testcase CNS-1039 """ dir_path = "/mnt/" - self._create_storage_class() node = self.ocp_master_node[0] - pv_size = 5 # Create PVC - pvc_name = oc_create_pvc(node, self.sc_name, pvc_size=pv_size) - self.addCleanup(wait_for_resource_absence, - node, 'pvc', pvc_name) - self.addCleanup(oc_delete, node, 'pvc', pvc_name) - verify_pvc_status_is_bound(node, pvc_name) + pv_size = 5 + self.create_storage_class(allow_volume_expansion=True) + pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size) # Create DC with POD and attached PVC to it. dc_name = oc_create_app_dc_with_io(node, pvc_name) -- cgit