diff options
author | Valerii Ponomarov <vponomar@redhat.com> | 2018-10-20 01:26:10 +0530 |
---|---|---|
committer | Valerii Ponomarov <vponomar@redhat.com> | 2018-10-24 14:32:09 +0000 |
commit | 0f9a9b152efb7b986919deada5761600eafcde47 (patch) | |
tree | d2e75024efb93b281c2df82569b245a6214cbbe4 | |
parent | 9fe4515438397e6153f51d58fd09551d240df6d3 (diff) |
[CNS-1040] Resize PVC exceeding available free space
Make sure that after failed attempt of PVC resize application POD
continues to work and we are able to write data.
Change-Id: I1dda1e4698ca4e569aa67740c2e9a5dca9a4e7bf
-rw-r--r-- | cns-libs/cnslibs/common/openshift_ops.py | 68 | ||||
-rw-r--r-- | tests/functional/common/provisioning/test_pv_resize.py | 114 |
2 files changed, 161 insertions, 21 deletions
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py index 830dc215..e86010cb 100644 --- a/cns-libs/cnslibs/common/openshift_ops.py +++ b/cns-libs/cnslibs/common/openshift_ops.py @@ -1188,3 +1188,71 @@ def verify_gluster_vol_for_pvc(hostname, pvc_name): g.log.info("verification of gluster vol %s for pvc %s is" "successful" % (gluster_vol, pvc_name)) return True + + +def get_events(hostname, + obj_name=None, obj_namespace=None, obj_type=None, + event_reason=None, event_type=None): + """Return filtered list of events. + + Args: + hostname (str): hostname of oc client + obj_name (str): name of an object + obj_namespace (str): namespace where object is located + obj_type (str): type of an object, i.e. PersistentVolumeClaim or Pod + event_reason (str): reason why event was created, + i.e. Created, Started, Unhealthy, SuccessfulCreate, Scheduled ... + event_type (str): type of an event, i.e. Normal or Warning + Returns: + List of dictionaries, where the latter are of following structure: + { + "involvedObject": { + "kind": "ReplicationController", + "name": "foo-object-name", + "namespace": "foo-object-namespace", + }, + "message": "Created pod: foo-object-name", + "metadata": { + "creationTimestamp": "2018-10-19T18:27:09Z", + "name": "foo-object-name.155f15db4e72cc2e", + "namespace": "foo-object-namespace", + }, + "reason": "SuccessfulCreate", + "reportingComponent": "", + "reportingInstance": "", + "source": {"component": "replication-controller"}, + "type": "Normal" + } + """ + field_selector = [] + if obj_name: + field_selector.append('involvedObject.name=%s' % obj_name) + if obj_namespace: + field_selector.append('involvedObject.namespace=%s' % obj_namespace) + if obj_type: + field_selector.append('involvedObject.kind=%s' % obj_type) + if event_reason: + field_selector.append('reason=%s' % event_reason) + if event_type: + field_selector.append('type=%s' % event_type) + cmd = "oc get events -o yaml --field-selector %s" % ",".join( + field_selector or "''") + return yaml.load(command.cmd_run(cmd, hostname=hostname))['items'] + + +def wait_for_events(hostname, + obj_name=None, obj_namespace=None, obj_type=None, + event_reason=None, event_type=None, + timeout=120, wait_step=3): + """Wait for appearence of specific set of events.""" + for w in waiter.Waiter(timeout, wait_step): + events = get_events( + hostname=hostname, obj_name=obj_name, obj_namespace=obj_namespace, + obj_type=obj_type, event_reason=event_reason, + event_type=event_type) + if events: + return events + if w.expired: + err_msg = ("Exceeded %ssec timeout waiting for events." % timeout) + g.log.error(err_msg) + raise exceptions.ExecutionError(err_msg) diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py index 1e92efe9..2ec7773a 100644 --- a/tests/functional/common/provisioning/test_pv_resize.py +++ b/tests/functional/common/provisioning/test_pv_resize.py @@ -1,8 +1,7 @@ import ddt from cnslibs.common.cns_libs import ( enable_pvc_resize) -from cnslibs.common.heketi_ops import ( - verify_volume_name_prefix) +from cnslibs.common import heketi_ops from cnslibs.common.openshift_ops import ( resize_pvc, get_pod_name_from_dc, @@ -18,6 +17,7 @@ from cnslibs.common.openshift_ops import ( verify_pv_size, verify_pvc_size, verify_pvc_status_is_bound, + wait_for_events, wait_for_pod_be_ready, wait_for_resource_absence) from cnslibs.cns.cns_baseclass import CnsBaseClass @@ -26,24 +26,23 @@ from glusto.core import Glusto as g @ddt.ddt class TestPvResizeClass(CnsBaseClass): - ''' - Class that contain test cases for - pv resize - ''' + """Test cases for PV resize.""" + @classmethod def setUpClass(cls): super(TestPvResizeClass, cls).setUpClass() - version = oc_version(cls.ocp_master_node[0]) - if any(v in version for v in ("3.6", "3.7", "3.8")): + cls.node = cls.ocp_master_node[0] + cls.version = oc_version(cls.node) + if any(v in cls.version for v in ("3.6", "3.7", "3.8")): + cls.skip_me = True return - enable_pvc_resize(cls.ocp_master_node[0]) + enable_pvc_resize(cls.node) def setUp(self): super(TestPvResizeClass, self).setUp() - version = oc_version(self.ocp_master_node[0]) - if any(v in version for v in ("3.6", "3.7", "3.8")): + if getattr(self, "skip_me", False): msg = ("pv resize is not available in openshift " - "version %s " % version) + "version %s " % self.version) g.log.error(msg) raise self.skipTest(msg) @@ -53,16 +52,15 @@ class TestPvResizeClass(CnsBaseClass): # create secret self.secret_name = oc_create_secret( - self.ocp_master_node[0], + self.node, namespace=secret['namespace'], data_key=self.heketi_cli_key, secret_type=secret['type']) - self.addCleanup( - oc_delete, self.ocp_master_node[0], 'secret', self.secret_name) + self.addCleanup(oc_delete, self.node, 'secret', self.secret_name) # create storageclass self.sc_name = oc_create_sc( - self.ocp_master_node[0], provisioner='kubernetes.io/glusterfs', + self.node, provisioner='kubernetes.io/glusterfs', resturl=sc['resturl'], restuser=sc['restuser'], secretnamespace=sc['secretnamespace'], secretname=self.secret_name, @@ -70,7 +68,7 @@ class TestPvResizeClass(CnsBaseClass): **({"volumenameprefix": sc['volumenameprefix']} if volname_prefix else {}) ) - self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name) + self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) return self.sc_name @@ -98,10 +96,10 @@ class TestPvResizeClass(CnsBaseClass): wait_for_pod_be_ready(node, pod_name) if volname_prefix: storage_class = self.cns_storage_class['storage_class1'] - ret = verify_volume_name_prefix(node, - storage_class['volumenameprefix'], - storage_class['secretnamespace'], - pvc_name, self.heketi_server_url) + ret = heketi_ops.verify_volume_name_prefix( + node, storage_class['volumenameprefix'], + storage_class['secretnamespace'], + pvc_name, self.heketi_server_url) self.assertTrue(ret, "verify volnameprefix failed") cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path @@ -127,3 +125,77 @@ class TestPvResizeClass(CnsBaseClass): ret, out, err = oc_rsh(node, pod_name, cmd) self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, node)) + + def test_pv_resize_no_free_space(self): + """Test case CNS-1040""" + dir_path = "/mnt" + pvc_size_gb = 1 + min_free_space_gb = 3 + + # Get available free space disabling redundant devices and nodes + heketi_url = self.heketi_server_url + node_id_list = heketi_ops.heketi_node_list( + self.heketi_client_node, heketi_url) + self.assertTrue(node_id_list) + nodes = {} + min_free_space = min_free_space_gb * 1024**2 + for node_id in node_id_list: + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, heketi_url, node_id, json=True) + if (node_info['state'].lower() != 'online' or + not node_info['devices']): + continue + if len(nodes) > 2: + out = heketi_ops.heketi_node_disable( + self.heketi_client_node, heketi_url, node_id) + self.assertTrue(out) + self.addCleanup( + heketi_ops.heketi_node_enable, + self.heketi_client_node, heketi_url, node_id) + for device in node_info['devices']: + if device['state'].lower() != 'online': + continue + free_space = device['storage']['free'] + if (node_id in nodes.keys() or free_space < min_free_space): + out = heketi_ops.heketi_device_disable( + self.heketi_client_node, heketi_url, device['id']) + self.assertTrue(out) + self.addCleanup( + heketi_ops.heketi_device_enable, + self.heketi_client_node, heketi_url, device['id']) + continue + nodes[node_id] = free_space + if len(nodes) < 3: + raise self.skipTest( + "Could not find 3 online nodes with, " + "at least, 1 online device having free space " + "bigger than %dGb." % min_free_space_gb) + + # Calculate maximum available size for PVC + available_size_gb = int(min(nodes.values()) / (1024**2)) + + # Create PVC + self._create_storage_class() + pvc_name = oc_create_pvc(self.node, self.sc_name, pvc_size=pvc_size_gb) + self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name) + self.addCleanup(oc_delete, self.node, 'pvc', pvc_name) + verify_pvc_status_is_bound(self.node, pvc_name) + + # Create DC with POD and attached PVC to it + dc_name = oc_create_app_dc_with_io(self.node, pvc_name) + self.addCleanup(oc_delete, self.node, 'dc', dc_name) + self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) + pod_name = get_pod_name_from_dc(self.node, dc_name) + wait_for_pod_be_ready(self.node, pod_name) + + # Try to expand existing PVC exceeding free space + resize_pvc(self.node, pvc_name, available_size_gb) + wait_for_events( + self.node, obj_name=pvc_name, event_reason='VolumeResizeFailed') + + # Check that app POD is up and runnig then try to write data + wait_for_pod_be_ready(self.node, pod_name) + cmd = "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path + ret, out, err = oc_rsh(self.node, pod_name, cmd) + self.assertEqual( + ret, 0, "Failed to write data after failed attempt to expand PVC.") |