From a801a3f5ab3761af6ba366a5e2f9cd19067f6794 Mon Sep 17 00:00:00 2001 From: Valerii Ponomarov Date: Mon, 2 Sep 2019 20:20:45 +0530 Subject: Add functionality to create more than 1 DC in parallel We have test cases which create more than 1 app DC at once. So, add functionality to be able to create bunch of DCs in parallel and reuse it in one of test cases. Change-Id: Id606d02c31a919bbc6d49d59714dd5628c6a835d --- .../openshiftstoragelibs/baseclass.py | 49 ++++++++++++--- .../openshiftstoragelibs/openshift_ops.py | 71 ++++++++++++++++++---- 2 files changed, 97 insertions(+), 23 deletions(-) (limited to 'openshift-storage-libs') diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py index 60875a2c..c7ab666a 100644 --- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py +++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py @@ -24,10 +24,9 @@ from openshiftstoragelibs.openshift_ops import ( oc_delete, oc_get_custom_resource, oc_get_pods, - scale_dc_pod_amount_and_wait, + scale_dcs_pod_amount_and_wait, switch_oc_project, verify_pvc_status_is_bound, - wait_for_pod_be_ready, wait_for_resource_absence, ) from openshiftstoragelibs.openshift_storage_libs import ( @@ -264,15 +263,45 @@ class BaseClass(unittest.TestCase): )[0] return self.pvc_name - def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10): - dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name) - self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name) + def create_dcs_with_pvc(self, pvc_names, timeout=600, wait_step=5): + """Create bunch of DCs with app PODs which use unique PVCs. + + Args: + pvc_names (str/set/list/tuple): List/set/tuple of PVC names + to attach to app PODs as part of DCs. + timeout (int): timeout value, default value is 600 seconds. + wait_step( int): wait step, default value is 5 seconds. + Returns: dictionary with following structure: + { + "pvc_name_1": ("dc_name_1", "pod_name_1"), + "pvc_name_2": ("dc_name_2", "pod_name_2"), + ... + "pvc_name_n": ("dc_name_n", "pod_name_n"), + } + """ + pvc_names = ( + pvc_names + if isinstance(pvc_names, (list, set, tuple)) else [pvc_names]) + dc_and_pod_names, dc_names = {}, {} + for pvc_name in pvc_names: + dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name) + dc_names[pvc_name] = dc_name + self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name) self.addCleanup( - scale_dc_pod_amount_and_wait, self.ocp_client[0], dc_name, 0) - pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name) - wait_for_pod_be_ready(self.ocp_client[0], pod_name, - timeout=timeout, wait_step=wait_step) - return dc_name, pod_name + scale_dcs_pod_amount_and_wait, self.ocp_client[0], + dc_names.values(), 0, timeout=timeout, wait_step=wait_step) + + for pvc_name, dc_name in dc_names.items(): + pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name) + dc_and_pod_names[pvc_name] = (dc_name, pod_name) + scale_dcs_pod_amount_and_wait( + self.ocp_client[0], dc_names.values(), 1, + timeout=timeout, wait_step=wait_step) + + return dc_and_pod_names + + def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10): + return self.create_dcs_with_pvc(pvc_name, timeout, wait_step)[pvc_name] def is_containerized_gluster(self): cmd = ("oc get pods --no-headers -l glusterfs-node=pod " diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py index d3cd1724..8a94e10b 100644 --- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py +++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py @@ -16,6 +16,7 @@ from glusto.core import Glusto as g from glustolibs.gluster import volume_ops import mock import six +import time import yaml from openshiftstoragelibs import command @@ -604,31 +605,75 @@ def wait_for_resource_absence(ocp_node, rtype, name, raise exceptions.ExecutionError(error_msg) -def scale_dc_pod_amount_and_wait(hostname, dc_name, - pod_amount=1, namespace=None): - """Scale amount of PODs for a DC. +def scale_dcs_pod_amount_and_wait(hostname, dc_names, pod_amount=1, + namespace=None, timeout=600, wait_step=5): + """Scale amount of PODs for a list of DCs. If pod_amount is 0, then wait for it's absence. If pod_amount => 1, then wait for all of a DC PODs to be ready. Args: hostname (str): Node on which the ocp command will run - dc_name (str): Name of heketi dc + dc_name (str/list/set/tuple): one or more DC names to be scaled. pod_amount (int): Number of PODs to scale. Default is 1. namespace (str): Namespace of a DC. + timeout (int): timeout value, default value is 600 seconds. + wait_step( int): wait step, default value is 5 seconds. + Returns: dictionary with following structure: + { + "dc_name_1": ["pod_name_1_1", "pod_name_1_2", ..., "pod_name_1_n"], + "dc_name_2": ["pod_name_2_1", "pod_name_2_2", ..., "pod_name_2_n"], + ... + "dc_name_n": ["pod_name_n_1", "pod_name_n_2", ..., "pod_name_n_n"], + } """ + dc_names = dc_names if hasattr(dc_names, '__iter__') else [dc_names] + dc_and_pod_names = {} namespace_arg = "--namespace=%s" % namespace if namespace else "" - scale_cmd = "oc scale --replicas=%d dc/%s %s" % ( - pod_amount, dc_name, namespace_arg) + scale_cmd = "oc scale %s --replicas=%d dc/%s" % ( + namespace_arg, pod_amount, " dc/".join(dc_names)) + command.cmd_run(scale_cmd, hostname=hostname) - pod_names = get_pod_names_from_dc(hostname, dc_name) - for pod_name in pod_names: - if pod_amount == 0: - wait_for_resource_absence(hostname, 'pod', pod_name) - else: - wait_for_pod_be_ready(hostname, pod_name) - return pod_names + _start_time = time.time() + for dc_name in dc_names: + dc_and_pod_names[dc_name] = get_pod_names_from_dc(hostname, dc_name) + for pod_name in dc_and_pod_names[dc_name]: + if pod_amount == 0: + wait_for_resource_absence( + hostname, 'pod', pod_name, + interval=wait_step, timeout=timeout) + else: + wait_for_pod_be_ready( + hostname, pod_name, + timeout=timeout, wait_step=wait_step) + time_diff = time.time() - _start_time + if time_diff > timeout: + timeout = wait_step + else: + timeout -= time_diff + return dc_and_pod_names + + +def scale_dc_pod_amount_and_wait(hostname, dc_name, pod_amount=1, + namespace=None, timeout=600, wait_step=5): + """Scale amount of PODs for a DC. + + If pod_amount is 0, then wait for it's absence. + If pod_amount => 1, then wait for all of a DC PODs to be ready. + + Args: + hostname (str): Node on which the ocp command will run + dc_name (str): Name of heketi dc + pod_amount (int): Number of PODs to scale. Default is 1. + namespace (str): Namespace of a DC. + timeout (int): timeout value, default value is 600 seconds. + wait_step( int): wait step, default value is 5 seconds. + Returns: List of POD names of a DC. + """ + return scale_dcs_pod_amount_and_wait( + hostname, dc_name, pod_amount, namespace=namespace, + timeout=timeout, wait_step=wait_step)[dc_name] def get_gluster_host_ips_by_pvc_name(ocp_node, pvc_name): -- cgit