diff options
author | vamahaja <vamahaja@redhat.com> | 2019-09-03 18:40:23 +0530 |
---|---|---|
committer | vamahaja <vamahaja@redhat.com> | 2019-09-13 17:28:10 +0530 |
commit | c5c02f6402ebe010e8db71eda738857d73f7e83d (patch) | |
tree | ec11367a5a2d2282ba9e1fb1b48fe2d56562fba4 | |
parent | 610c21c8a73329a13167f47bf6de0cf66e57cd05 (diff) |
Fix test cases which are failing to delete volume after test completion
In test case when we use large disk size, heketi takes time to create
volume and due to timeout it gives error.
Add fix in such test cases to check if volume created after getting
an exception, get details of such volumes or raise exception in case
it fails to create volume.
Change-Id: I1c23a8c6558c23edf8947771e4f41a4bd3ffd66a
Signed-off-by: vamahaja <vamahaja@redhat.com>
5 files changed, 77 insertions, 55 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py index 554863ee..ae433872 100644 --- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py +++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py @@ -1,7 +1,9 @@ import datetime +import re import unittest from glusto.core import Glusto as g +import six from openshiftstoragelibs import command from openshiftstoragelibs.exceptions import ( @@ -15,7 +17,10 @@ from openshiftstoragelibs.heketi_ops import ( hello_heketi, heketi_blockvolume_delete, heketi_blockvolume_info, + heketi_volume_create, heketi_volume_delete, + heketi_volume_info, + heketi_volume_list, ) from openshiftstoragelibs.openshift_ops import ( get_pod_name_from_dc, @@ -41,6 +46,8 @@ from openshiftstoragelibs.openshift_storage_libs import ( from openshiftstoragelibs.openshift_version import get_openshift_version from openshiftstoragelibs.waiter import Waiter +HEKETI_VOLUME_REGEX = "Id:(.*).Cluster:(.*).Name:%s" + class BaseClass(unittest.TestCase): """Base class for test classes.""" @@ -308,6 +315,41 @@ class BaseClass(unittest.TestCase): def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10): return self.create_dcs_with_pvc(pvc_name, timeout, wait_step)[pvc_name] + def create_heketi_volume_with_name_and_wait( + self, name, size, timeout=600, wait_step=10, **kwargs): + json = kwargs.get("json", False) + + try: + h_volume_info = heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, + size, name=name, **kwargs) + except Exception as e: + if ('more required' in six.text_type(e) + or ('Failed to allocate new volume' in six.text_type(e))): + raise + + for w in Waiter(timeout, wait_step): + h_volumes = heketi_volume_list( + self.heketi_client_node, self.heketi_server_url) + h_volume_match = re.search( + HEKETI_VOLUME_REGEX % name, h_volumes) + if h_volume_match: + h_volume_info = heketi_volume_info( + self.heketi_client_node, self.heketi_server_url, + h_volume_match.group(1), json=json) + break + + if w.expired: + g.log.info( + "Heketi volume with name %s not created in 600 sec" % name) + raise + + self.addCleanup( + heketi_volume_delete, self.heketi_client_node, + self.heketi_server_url, h_volume_info["id"]) + + return h_volume_info + def is_containerized_gluster(self): cmd = ("oc get pods --no-headers -l glusterfs-node=pod " "-o=custom-columns=:.spec.nodeName") diff --git a/tests/functional/arbiter/test_arbiter.py b/tests/functional/arbiter/test_arbiter.py index 6d5ab44e..be0ab39d 100644 --- a/tests/functional/arbiter/test_arbiter.py +++ b/tests/functional/arbiter/test_arbiter.py @@ -515,27 +515,10 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass): self.create_storage_class(is_arbiter_vol=True) # Create and delete 3 small volumes concurrently - pvc_names = [] - for i in range(3): - pvc_name = openshift_ops.oc_create_pvc( - self.node, self.sc_name, pvc_name_prefix='arbiter-pvc', - pvc_size=int(pvc_size / 3)) - pvc_names.append(pvc_name) - exception_exists = False - for pvc_name in pvc_names: - try: - openshift_ops.verify_pvc_status_is_bound(self.node, pvc_name) - except Exception: - for pvc_name in pvc_names: - self.addCleanup( - openshift_ops.wait_for_resource_absence, - self.node, 'pvc', pvc_name) - for pvc_name in pvc_names: - self.addCleanup( - openshift_ops.oc_delete, self.node, 'pvc', pvc_name) - exception_exists = True - if exception_exists: - raise + pvc_names = self.create_and_wait_for_pvcs( + pvc_size=int(pvc_size / 3), pvc_name_prefix='arbiter-pvc', + pvc_amount=3, sc_name=self.sc_name) + for pvc_name in pvc_names: openshift_ops.oc_delete(self.node, 'pvc', pvc_name) for pvc_name in pvc_names: @@ -547,7 +530,8 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass): self.node, self.sc_name, pvc_name_prefix='arbiter-pvc', pvc_size=pvc_size) try: - openshift_ops.verify_pvc_status_is_bound(self.node, pvc_name) + openshift_ops.verify_pvc_status_is_bound( + self.node, pvc_name, 300, 10) except Exception: self.addCleanup( openshift_ops.wait_for_resource_absence, diff --git a/tests/functional/heketi/test_block_volumes_heketi.py b/tests/functional/heketi/test_block_volumes_heketi.py index 5520e8bc..b6ff9ee0 100644 --- a/tests/functional/heketi/test_block_volumes_heketi.py +++ b/tests/functional/heketi/test_block_volumes_heketi.py @@ -114,16 +114,13 @@ class TestBlockVolumeOps(BaseClass): self.skipTest("Skipping the test since free_space_available %s" "is less than the default_bhv_size %s" % (free_space_available, default_bhv_size)) - block_host_create_info = heketi_volume_create( - self.heketi_client_node, self.heketi_server_url, - default_bhv_size, json=True, block=True) + h_volume_name = ( + "autotests-heketi-volume-%s" % utils.get_random_str()) + block_host_create_info = self.create_heketi_volume_with_name_and_wait( + h_volume_name, default_bhv_size, json=True, block=True) + block_vol_size = block_host_create_info["blockinfo"]["freesize"] block_hosting_vol_id = block_host_create_info["id"] - self.addCleanup(heketi_volume_delete, - self.heketi_client_node, - self.heketi_server_url, - block_hosting_vol_id, - raise_on_error=True) block_vol_info = {"blockhostingvolume": "init_value"} while (block_vol_info['blockhostingvolume'] != block_hosting_vol_id): block_vol = heketi_blockvolume_create( diff --git a/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py index e19502c2..82fcf704 100644 --- a/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py +++ b/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py @@ -13,11 +13,11 @@ from openshiftstoragelibs.heketi_ops import ( heketi_node_enable, heketi_node_info, heketi_node_list, - heketi_volume_create, heketi_volume_delete, heketi_volume_list, ) from openshiftstoragelibs import podcmd +from openshiftstoragelibs import utils @ddt.ddt @@ -100,33 +100,27 @@ class TestHeketiVolume(BaseClass): # Create distributed vol vol_size_gb = self._get_vol_size() heketi_url = self.heketi_server_url + h_volume_name = "autotests-heketi-volume-%s" % utils.get_random_str() try: - g.log.info( - "Trying to create distributed '%s'Gb volume." % vol_size_gb) - heketi_vol = heketi_volume_create( - self.heketi_client_node, heketi_url, vol_size_gb, - json=True, block=block) + heketi_vol = self.create_heketi_volume_with_name_and_wait( + h_volume_name, vol_size_gb, json=True) except AssertionError as e: # NOTE: rare situation when we need to decrease size of a volume. # and we expect this vol to be distributed. g.log.info("Failed to create distributed '%s'Gb volume. " "Trying to create another one, smaller for 1Gb.") - if ('more required' in str(e) + if not ('more required' in str(e) and ('Insufficient suitable allocatable extents for ' 'logical volume' in str(e))): - vol_size_gb -= 1 - heketi_vol = heketi_volume_create( - self.heketi_client_node, heketi_url, vol_size_gb, - json=True, block=block) - else: raise + + vol_size_gb -= 1 + heketi_vol = self.create_heketi_volume_with_name_and_wait( + h_volume_name, vol_size_gb, json=True) g.log.info("Successfully created distributed volume.") vol_name = heketi_vol['name'] vol_id = heketi_vol["bricks"][0]["volume"] - self.addCleanup( - heketi_volume_delete, self.heketi_client_node, heketi_url, - vol_id, raise_on_error=(not validate_cleanup)) # Get gluster volume info g.log.info("Get gluster volume '%s' info" % vol_name) diff --git a/tests/functional/heketi/test_heketi_device_operations.py b/tests/functional/heketi/test_heketi_device_operations.py index e27fc0d5..2261304a 100644 --- a/tests/functional/heketi/test_heketi_device_operations.py +++ b/tests/functional/heketi/test_heketi_device_operations.py @@ -17,6 +17,7 @@ from openshiftstoragelibs.heketi_ops import ( heketi_volume_create, heketi_volume_delete, ) +from openshiftstoragelibs import utils @ddt.ddt @@ -381,19 +382,23 @@ class TestHeketiDeviceOperations(BaseClass): # Create volume with such size that we consume space more than # size of smaller disks + h_volume_name = "autotests-heketi-volume-%s" % utils.get_random_str() try: - heketi_vol = heketi_volume_create( - heketi_node, heketi_url, vol_size_gb, json=True) + self.create_heketi_volume_with_name_and_wait( + h_volume_name, vol_size_gb, json=True) except Exception as e: - g.log.warning( - "Got following error trying to create '%s'Gb vol: %s" % ( - vol_size_gb, e)) + # NOTE: rare situation when we need to decrease size of a volume. + g.log.info("Failed to create '%s'Gb volume. " + "Trying to create another one, smaller for 1Gb.") + + if not ('more required' in str(e) + and ('Insufficient suitable allocatable extents for ' + 'logical volume' in str(e))): + raise + vol_size_gb -= 1 - heketi_vol = heketi_volume_create( - heketi_node, heketi_url, vol_size_gb, json=True) - self.addCleanup( - heketi_volume_delete, self.heketi_client_node, - self.heketi_server_url, heketi_vol["bricks"][0]["volume"]) + self.create_heketi_volume_with_name_and_wait( + h_volume_name, vol_size_gb, json=True) # Try to 'remove' bigger Heketi disk expecting error, # because there is no space on smaller disk to relocate bricks to |