summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py49
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py71
-rw-r--r--tests/functional/gluster_stability/test_gluster_block_stability.py35
3 files changed, 105 insertions, 50 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index 60875a2c..c7ab666a 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -24,10 +24,9 @@ from openshiftstoragelibs.openshift_ops import (
oc_delete,
oc_get_custom_resource,
oc_get_pods,
- scale_dc_pod_amount_and_wait,
+ scale_dcs_pod_amount_and_wait,
switch_oc_project,
verify_pvc_status_is_bound,
- wait_for_pod_be_ready,
wait_for_resource_absence,
)
from openshiftstoragelibs.openshift_storage_libs import (
@@ -264,15 +263,45 @@ class BaseClass(unittest.TestCase):
)[0]
return self.pvc_name
- def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10):
- dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name)
- self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
+ def create_dcs_with_pvc(self, pvc_names, timeout=600, wait_step=5):
+ """Create bunch of DCs with app PODs which use unique PVCs.
+
+ Args:
+ pvc_names (str/set/list/tuple): List/set/tuple of PVC names
+ to attach to app PODs as part of DCs.
+ timeout (int): timeout value, default value is 600 seconds.
+ wait_step( int): wait step, default value is 5 seconds.
+ Returns: dictionary with following structure:
+ {
+ "pvc_name_1": ("dc_name_1", "pod_name_1"),
+ "pvc_name_2": ("dc_name_2", "pod_name_2"),
+ ...
+ "pvc_name_n": ("dc_name_n", "pod_name_n"),
+ }
+ """
+ pvc_names = (
+ pvc_names
+ if isinstance(pvc_names, (list, set, tuple)) else [pvc_names])
+ dc_and_pod_names, dc_names = {}, {}
+ for pvc_name in pvc_names:
+ dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name)
+ dc_names[pvc_name] = dc_name
+ self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
self.addCleanup(
- scale_dc_pod_amount_and_wait, self.ocp_client[0], dc_name, 0)
- pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name)
- wait_for_pod_be_ready(self.ocp_client[0], pod_name,
- timeout=timeout, wait_step=wait_step)
- return dc_name, pod_name
+ scale_dcs_pod_amount_and_wait, self.ocp_client[0],
+ dc_names.values(), 0, timeout=timeout, wait_step=wait_step)
+
+ for pvc_name, dc_name in dc_names.items():
+ pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name)
+ dc_and_pod_names[pvc_name] = (dc_name, pod_name)
+ scale_dcs_pod_amount_and_wait(
+ self.ocp_client[0], dc_names.values(), 1,
+ timeout=timeout, wait_step=wait_step)
+
+ return dc_and_pod_names
+
+ def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10):
+ return self.create_dcs_with_pvc(pvc_name, timeout, wait_step)[pvc_name]
def is_containerized_gluster(self):
cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
index d3cd1724..8a94e10b 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -16,6 +16,7 @@ from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops
import mock
import six
+import time
import yaml
from openshiftstoragelibs import command
@@ -604,31 +605,75 @@ def wait_for_resource_absence(ocp_node, rtype, name,
raise exceptions.ExecutionError(error_msg)
-def scale_dc_pod_amount_and_wait(hostname, dc_name,
- pod_amount=1, namespace=None):
- """Scale amount of PODs for a DC.
+def scale_dcs_pod_amount_and_wait(hostname, dc_names, pod_amount=1,
+ namespace=None, timeout=600, wait_step=5):
+ """Scale amount of PODs for a list of DCs.
If pod_amount is 0, then wait for it's absence.
If pod_amount => 1, then wait for all of a DC PODs to be ready.
Args:
hostname (str): Node on which the ocp command will run
- dc_name (str): Name of heketi dc
+ dc_name (str/list/set/tuple): one or more DC names to be scaled.
pod_amount (int): Number of PODs to scale. Default is 1.
namespace (str): Namespace of a DC.
+ timeout (int): timeout value, default value is 600 seconds.
+ wait_step( int): wait step, default value is 5 seconds.
+ Returns: dictionary with following structure:
+ {
+ "dc_name_1": ["pod_name_1_1", "pod_name_1_2", ..., "pod_name_1_n"],
+ "dc_name_2": ["pod_name_2_1", "pod_name_2_2", ..., "pod_name_2_n"],
+ ...
+ "dc_name_n": ["pod_name_n_1", "pod_name_n_2", ..., "pod_name_n_n"],
+ }
"""
+ dc_names = dc_names if hasattr(dc_names, '__iter__') else [dc_names]
+ dc_and_pod_names = {}
namespace_arg = "--namespace=%s" % namespace if namespace else ""
- scale_cmd = "oc scale --replicas=%d dc/%s %s" % (
- pod_amount, dc_name, namespace_arg)
+ scale_cmd = "oc scale %s --replicas=%d dc/%s" % (
+ namespace_arg, pod_amount, " dc/".join(dc_names))
+
command.cmd_run(scale_cmd, hostname=hostname)
- pod_names = get_pod_names_from_dc(hostname, dc_name)
- for pod_name in pod_names:
- if pod_amount == 0:
- wait_for_resource_absence(hostname, 'pod', pod_name)
- else:
- wait_for_pod_be_ready(hostname, pod_name)
- return pod_names
+ _start_time = time.time()
+ for dc_name in dc_names:
+ dc_and_pod_names[dc_name] = get_pod_names_from_dc(hostname, dc_name)
+ for pod_name in dc_and_pod_names[dc_name]:
+ if pod_amount == 0:
+ wait_for_resource_absence(
+ hostname, 'pod', pod_name,
+ interval=wait_step, timeout=timeout)
+ else:
+ wait_for_pod_be_ready(
+ hostname, pod_name,
+ timeout=timeout, wait_step=wait_step)
+ time_diff = time.time() - _start_time
+ if time_diff > timeout:
+ timeout = wait_step
+ else:
+ timeout -= time_diff
+ return dc_and_pod_names
+
+
+def scale_dc_pod_amount_and_wait(hostname, dc_name, pod_amount=1,
+ namespace=None, timeout=600, wait_step=5):
+ """Scale amount of PODs for a DC.
+
+ If pod_amount is 0, then wait for it's absence.
+ If pod_amount => 1, then wait for all of a DC PODs to be ready.
+
+ Args:
+ hostname (str): Node on which the ocp command will run
+ dc_name (str): Name of heketi dc
+ pod_amount (int): Number of PODs to scale. Default is 1.
+ namespace (str): Namespace of a DC.
+ timeout (int): timeout value, default value is 600 seconds.
+ wait_step( int): wait step, default value is 5 seconds.
+ Returns: List of POD names of a DC.
+ """
+ return scale_dcs_pod_amount_and_wait(
+ hostname, dc_name, pod_amount, namespace=namespace,
+ timeout=timeout, wait_step=wait_step)[dc_name]
def get_gluster_host_ips_by_pvc_name(ocp_node, pvc_name):
diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py
index d1b69fe3..7c51506e 100644
--- a/tests/functional/gluster_stability/test_gluster_block_stability.py
+++ b/tests/functional/gluster_stability/test_gluster_block_stability.py
@@ -16,12 +16,11 @@ from openshiftstoragelibs.openshift_ops import (
get_pod_name_from_dc,
get_pv_name_from_pvc,
oc_adm_manage_node,
- oc_create_app_dc_with_io,
oc_delete,
oc_get_custom_resource,
oc_get_schedulable_nodes,
oc_rsh,
- scale_dc_pod_amount_and_wait,
+ scale_dcs_pod_amount_and_wait,
wait_for_pod_be_ready,
wait_for_resource_absence,
wait_for_service_status_on_gluster_pod_or_node,
@@ -383,28 +382,14 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.addCleanup(
oc_adm_manage_node, self.node, '--schedulable=true', nodes=o_nodes)
- # Create 10 PVC's
+ # Create 10 PVC's and 10 DC's
pvcs = self.create_and_wait_for_pvcs(pvc_amount=10)
-
- pvc_and_dc = {}
- dc_names = ''
- # Create DC's
- for pvc in pvcs:
- dc_name = oc_create_app_dc_with_io(self.node, pvc)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(
- scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
- pvc_and_dc[pvc] = {'dc_name': dc_name}
- dc_names += ' ' + dc_name
-
- # Delete all pods before waiting for absence in cleanup to speedup
- cmd_scale = "oc scale dc --replicas=0"
- self.addCleanup(cmd_run, (cmd_scale + dc_names), self.node)
+ dcs = self.create_dcs_with_pvc(pvcs)
# Wait for app pods and verify block sessions
+ pvc_and_dc = {}
for pvc in pvcs:
- dc_name = pvc_and_dc[pvc]['dc_name']
- pod_name = get_pod_name_from_dc(self.node, dc_name)
+ dc_name, pod_name = dcs[pvc]
wait_for_pod_be_ready(self.node, pod_name, wait_step=10)
iqn, _, p_node = self.verify_iscsi_sessions_and_multipath(
@@ -417,13 +402,9 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
}
# Delete 5 pods permanently
- dc_names = " ".join([pvc_and_dc[pvc]['dc_name'] for pvc in pvcs[:5]])
- cmd_run((cmd_scale + ' ' + dc_names), self.node)
-
- # Wait for pods to be delete, for permanently deleted pods
- for pvc in pvcs[:5]:
- wait_for_resource_absence(
- self.node, 'pod', pvc_and_dc[pvc]['pod_name'])
+ scale_dcs_pod_amount_and_wait(
+ self.node, [pvc_and_dc[pvc]['dc_name'] for pvc in pvcs[:5]],
+ pod_amount=0)
# Wait for logout, for permanently deleted pods
temp_pvcs = pvcs[:5]