summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2018-05-10 16:40:39 +0530
committerValerii Ponomarov <vponomar@redhat.com>2018-05-10 16:40:39 +0530
commit2bee3d332568b1e153b8a46c59253a750feea8e5 (patch)
treee8ac2a297b0c78a11605536b18ae4a4381d82d86
parent695e5f44ab03db37248e8e920ee6071e24536d2f (diff)
Fix broken dynamic_provisioning tests
Fix 2 autotests, remove skip decorator for them. And rework logic of mongodb template uploading for mongodb pod, so it takes couple of seconds instead of minutes. Change-Id: Ib2b09364ae435b9784b76c2f2581c197128f9649
-rw-r--r--cns-libs/cnslibs/cns/cns_baseclass.py1
-rw-r--r--cns-libs/cnslibs/common/dynamic_provisioning.py43
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py18
-rw-r--r--tests/cns_tests_sample_config.yml1
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py57
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py62
6 files changed, 121 insertions, 61 deletions
diff --git a/cns-libs/cnslibs/cns/cns_baseclass.py b/cns-libs/cnslibs/cns/cns_baseclass.py
index 6f8cb4b9..3df23fe1 100644
--- a/cns-libs/cnslibs/cns/cns_baseclass.py
+++ b/cns-libs/cnslibs/cns/cns_baseclass.py
@@ -65,6 +65,7 @@ class CnsBaseClass(unittest.TestCase):
# Initializes heketi config variables
heketi_config = g.config['cns']['heketi_config']
+ cls.heketi_dc_name = heketi_config['heketi_dc_name']
cls.heketi_service_name = heketi_config['heketi_service_name']
cls.heketi_client_node = heketi_config['heketi_client_node']
cls.heketi_server_url = heketi_config['heketi_server_url']
diff --git a/cns-libs/cnslibs/common/dynamic_provisioning.py b/cns-libs/cnslibs/common/dynamic_provisioning.py
index 7c1d0168..6285239a 100644
--- a/cns-libs/cnslibs/common/dynamic_provisioning.py
+++ b/cns-libs/cnslibs/common/dynamic_provisioning.py
@@ -1,6 +1,7 @@
from collections import OrderedDict
import json
import os
+import tempfile
from glusto.core import Glusto as g
from glustolibs.misc.misc_libs import upload_scripts
@@ -267,35 +268,39 @@ def create_mongodb_pod(hostname, pvc_name, pvc_size, sc_name):
Returns: True if successfull,
False otherwise
'''
- ret = upload_scripts(hostname,
- os.path.join(TEMPLATE_DIR, "mongodb-template.json"),
- "/tmp/app-templates", "root")
- if not ret:
- g.log.error("Failed to upload mongodp template to %s" % hostname)
- return False
+ template_path = os.path.join(TEMPLATE_DIR, "mongodb-template.json")
+ with open(template_path, 'r') as template_f:
+ data = json.load(template_f, object_pairs_hook=OrderedDict)
+ data['objects'][1]['metadata']['annotations'][
+ 'volume.beta.kubernetes.io/storage-class'] = sc_name
+
+ tmp_fd, tmp_path = tempfile.mkstemp(
+ prefix='cns-automation-mongodb-pvcname-%s-' % pvc_name, suffix='.json')
+ dst_dir = '/tmp'
+ dst_path = os.path.join(dst_dir, os.path.basename(tmp_path))
+ try:
+ with os.fdopen(tmp_fd, 'w') as tmp_f:
+ json.dump(
+ data, tmp_f, sort_keys=False, indent=4, ensure_ascii=False)
+ if not upload_scripts(hostname, tmp_path, dst_dir, "root"):
+ g.log.error("Failed to upload mongodp template to %s" % hostname)
+ return False
+ finally:
+ os.remove(tmp_path)
+
try:
conn = g.rpyc_get_connection(hostname, user="root")
if conn is None:
g.log.error("Failed to get rpyc connection of node %s"
% hostname)
return False
- with conn.builtin.open(
- '/tmp/app-templates/mongodb-template.json', 'r') as data_file:
- data = json.load(data_file, object_pairs_hook=OrderedDict)
- data['objects'][1]['metadata']['annotations'][
- 'volume.beta.kubernetes.io/storage-class'] = sc_name
- with conn.builtin.open('/%s.json' % pvc_name, 'w') as data_file:
- json.dump(data, data_file, sort_keys=False,
- indent=4, ensure_ascii=False)
- cmd = ("oc new-app /%s.json --param=DATABASE_SERVICE_NAME=%s "
+ cmd = ("oc new-app %s --param=DATABASE_SERVICE_NAME=%s "
"--param=VOLUME_CAPACITY=%sGi") % (
- pvc_name, pvc_name, pvc_size)
+ dst_path, pvc_name, pvc_size)
ret, out, err = g.run(hostname, cmd, "root")
if ret != 0:
- g.log.error("failed to execute cmd %s on %s" % (
- cmd, hostname))
+ g.log.error("failed to execute cmd %s on %s" % (cmd, hostname))
return False
-
except Exception as err:
g.log.error("failed to create mongodb pod %s" % err)
return False
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index 3d3dd061..84edfdd6 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -6,9 +6,11 @@ Various utility functions for interacting with OCP/OpenShift.
import re
import types
+from glusto.core import Glusto as g
import yaml
-from glusto.core import Glusto as g
+from cnslibs.common import exceptions
+from cnslibs.common import waiter
PODS_WIDE_RE = re.compile(
@@ -291,3 +293,17 @@ def create_namespace(hostname, namespace):
return True
g.log.error("failed to create namespace %s" % namespace)
return False
+
+
+def wait_for_resource_absence(ocp_node, rtype, name,
+ interval=10, timeout=120):
+ for w in waiter.Waiter(timeout=timeout, interval=interval):
+ try:
+ oc_get_yaml(ocp_node, rtype, name, raise_on_error=True)
+ except AssertionError:
+ return
+ if w.expired:
+ error_msg = "%s '%s' still exists after waiting for it %d seconds" % (
+ rtype, name, timeout)
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
diff --git a/tests/cns_tests_sample_config.yml b/tests/cns_tests_sample_config.yml
index ba06bd75..52612a96 100644
--- a/tests/cns_tests_sample_config.yml
+++ b/tests/cns_tests_sample_config.yml
@@ -72,6 +72,7 @@ cns:
trusted_storage_pool_list:
- [gluster_server1, gluster_server2]
heketi_config:
+ heketi_dc_name: "fake-name-of-heketi-deployment-config"
heketi_service_name: "fake-name-of-heketi-service"
heketi_client_node:
heketi_server_url: "http://heketi-storage-project.cloudapps.mystorage.com"
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index 8db0b01d..f756a759 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -5,19 +5,17 @@ from cnslibs.common.dynamic_provisioning import (
get_pvc_status,
verify_pod_status_running)
from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass
-from cnslibs.common.exceptions import (
- ConfigError,
- ExecutionError)
+from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_ops import (
export_heketi_cli_server)
from cnslibs.common.openshift_ops import (
get_ocp_gluster_pod_names,
oc_create,
oc_delete,
- oc_rsh)
+ oc_rsh,
+ wait_for_resource_absence)
from cnslibs.common.waiter import Waiter
from glusto.core import Glusto as g
-import unittest
class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
@@ -135,7 +133,6 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
- @unittest.skip("skiping heketi-pod failure testcase")
def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
g.log.info("test_dynamic_provisioning_glusterblock_Heketipod_Failure")
storage_class = self.cns_storage_class['storage_class2']
@@ -189,6 +186,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
"/%s.yaml" % secret['secret_name'])
self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
secret['secret_name'])
+
+ # Create App pod #1 and write data to it
ret = create_mongodb_pod(self.ocp_master_node[0],
pvc_name2, 10, sc_name)
self.assertTrue(ret, "creation of mongodb pod failed")
@@ -198,8 +197,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
pvc_name2)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
pvc_name2)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name2)
+ ret = verify_pod_status_running(
+ self.ocp_master_node[0], pvc_name2, wait_step=5, timeout=300)
self.assertTrue(ret, "verify mongodb pod status as running failed")
cmd = ("oc get pods | grep %s | grep -v deploy "
"| awk {'print $1'}") % pvc_name2
@@ -212,9 +211,23 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
- oc_delete(self.ocp_master_node[0], 'dc', "heketi")
- oc_delete(self.ocp_master_node[0], 'service', "heketi")
- oc_delete(self.ocp_master_node[0], 'route', "heketi")
+
+ # Remove Heketi pod
+ heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
+ self.heketi_dc_name, self.cns_project_name)
+ heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
+ self.heketi_dc_name, self.cns_project_name)
+ self.addCleanup(g.run, self.ocp_master_node[0], heketi_up_cmd, "root")
+ ret, out, err = g.run(self.ocp_master_node[0], heketi_down_cmd, "root")
+
+ get_heketi_podname_cmd = (
+ "oc get pods --all-namespaces -o=custom-columns=:.metadata.name "
+ "--no-headers=true "
+ "--selector deploymentconfig=%s" % self.heketi_dc_name)
+ ret, out, err = g.run(self.ocp_master_node[0], get_heketi_podname_cmd)
+ wait_for_resource_absence(self.ocp_master_node[0], 'pod', out.strip())
+
+ # Create App pod #2
pvc_name3 = "mongodb3-block"
ret = create_mongodb_pod(self.ocp_master_node[0],
pvc_name3, 10, sc_name)
@@ -230,12 +243,18 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
self.assertEqual(status, "Pending", "pvc status of "
"%s is not in Pending state" % pvc_name3)
- cmd = "oc process heketi | oc create -f -"
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+
+ # Bring Heketi pod back
+ ret, out, err = g.run(self.ocp_master_node[0], heketi_up_cmd, "root")
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- ret = verify_pod_status_running(self.ocp_master_node[0], "heketi")
+ heketi_up_cmd, self.ocp_master_node[0]))
+
+ ret, out, err = g.run(self.ocp_master_node[0], get_heketi_podname_cmd)
+ ret = verify_pod_status_running(
+ self.ocp_master_node[0], out.strip(), wait_step=5, timeout=120)
self.assertTrue(ret, "verify heketi pod status as running failed")
+
+ # Verify App pod #2
cmd = ("oc get svc %s "
"-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
@@ -270,15 +289,15 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
else:
break
if w.expired:
- error_msg = ("exceeded timeout 300 sec, pvc %s not in"
- " Bound state" % pvc_name3)
+ error_msg = ("exceeded timeout 600 sec, pvc %s not in"
+ " Bound state" % pvc_name3)
g.log.error(error_msg)
raise ExecutionError(error_msg)
self.assertEqual(status, "Bound", "pvc status of %s "
"is not in Bound state, its state is %s" % (
pvc_name3, status))
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name3)
+ ret = verify_pod_status_running(
+ self.ocp_master_node[0], pvc_name3, wait_step=5, timeout=300)
self.assertTrue(ret, "verify %s pod status as "
"running failed" % pvc_name3)
cmd = ("oc get pods | grep %s | grep -v deploy "
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index 894ba33d..c08bbf9b 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -4,22 +4,18 @@ from cnslibs.common.dynamic_provisioning import (
create_storage_class_file,
get_pvc_status,
verify_pod_status_running)
-from cnslibs.cns.cns_baseclass import (
- CnsBaseClass,
- CnsSetupBaseClass)
-from cnslibs.common.exceptions import (
- ConfigError,
- ExecutionError)
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_ops import (
verify_volume_name_prefix)
from cnslibs.common.openshift_ops import (
get_ocp_gluster_pod_names,
oc_create,
oc_delete,
- oc_rsh)
+ oc_rsh,
+ wait_for_resource_absence)
from cnslibs.common.waiter import Waiter
from glusto.core import Glusto as g
-import unittest
class TestDynamicProvisioningP0(CnsBaseClass):
@@ -123,7 +119,6 @@ class TestDynamicProvisioningP0(CnsBaseClass):
self.dynamic_provisioning_glusterfile(pvc_name="mongodb5",
volname_prefix=True)
- @unittest.skip("skiping heketi-pod failure testcase")
def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
g.log.info("test_dynamic_provisioning_glusterfile_Heketipod_Failure")
storage_class = self.cns_storage_class['storage_class1']
@@ -162,6 +157,8 @@ class TestDynamicProvisioningP0(CnsBaseClass):
"/%s.yaml" % secret['secret_name'])
self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
secret['secret_name'])
+
+ # Create App pod #1 and write data to it
ret = create_mongodb_pod(self.ocp_master_node[0], pvc_name2,
10, sc_name)
self.assertTrue(ret, "creation of mongodb pod failed")
@@ -171,8 +168,8 @@ class TestDynamicProvisioningP0(CnsBaseClass):
pvc_name2)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
pvc_name2)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name2)
+ ret = verify_pod_status_running(
+ self.ocp_master_node[0], pvc_name2, wait_step=5, timeout=300)
self.assertTrue(ret, "verify mongodb pod status as running failed")
cmd = ("oc get pods | grep %s | grep -v deploy "
"|awk {'print $1'}") % pvc_name2
@@ -185,9 +182,23 @@ class TestDynamicProvisioningP0(CnsBaseClass):
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
- oc_delete(self.ocp_master_node[0], 'dc', "heketi")
- oc_delete(self.ocp_master_node[0], 'service', "heketi")
- oc_delete(self.ocp_master_node[0], 'route', "heketi")
+
+ # Remove Heketi pod
+ heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
+ self.heketi_dc_name, self.cns_project_name)
+ heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
+ self.heketi_dc_name, self.cns_project_name)
+ self.addCleanup(g.run, self.ocp_master_node[0], heketi_up_cmd, "root")
+ ret, out, err = g.run(self.ocp_master_node[0], heketi_down_cmd, "root")
+
+ get_heketi_podname_cmd = (
+ "oc get pods --all-namespaces -o=custom-columns=:.metadata.name "
+ "--no-headers=true "
+ "--selector deploymentconfig=%s" % self.heketi_dc_name)
+ ret, out, err = g.run(self.ocp_master_node[0], get_heketi_podname_cmd)
+ wait_for_resource_absence(self.ocp_master_node[0], 'pod', out.strip())
+
+ # Create App pod #2
pvc_name3 = "mongodb3"
ret = create_mongodb_pod(self.ocp_master_node[0],
pvc_name3, 10, sc_name)
@@ -203,12 +214,18 @@ class TestDynamicProvisioningP0(CnsBaseClass):
self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
self.assertEqual(status, "Pending", "pvc status of "
"%s is not in Pending state" % pvc_name3)
- cmd = "oc process heketi | oc create -f -"
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+
+ # Bring Heketi pod back
+ ret, out, err = g.run(self.ocp_master_node[0], heketi_up_cmd, "root")
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- ret = verify_pod_status_running(self.ocp_master_node[0], "heketi")
+ heketi_up_cmd, self.ocp_master_node[0]))
+
+ ret, out, err = g.run(self.ocp_master_node[0], get_heketi_podname_cmd)
+ ret = verify_pod_status_running(
+ self.ocp_master_node[0], out.strip(), wait_step=5, timeout=120)
self.assertTrue(ret, "verify heketi pod status as running failed")
+
+ # Verify App pod #2
cmd = ("oc get svc %s "
"-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
@@ -243,17 +260,18 @@ class TestDynamicProvisioningP0(CnsBaseClass):
else:
break
if w.expired:
- error_msg = ("exceeded timeout 300 sec, pvc %s not in"
- " Bound state" % pvc_name3)
+ error_msg = ("exceeded timeout 600 sec, pvc %s not in"
+ " Bound state" % pvc_name3)
g.log.error(error_msg)
raise ExecutionError(error_msg)
self.assertEqual(status, "Bound", "pvc status of %s "
"is not in Bound state, its state is %s" % (
pvc_name3, status))
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name3)
+ ret = verify_pod_status_running(
+ self.ocp_master_node[0], pvc_name3, wait_step=5, timeout=300)
self.assertTrue(ret, "verify %s pod status "
"as running failed" % pvc_name3)
+
cmd = ("oc get pods | grep %s | grep -v deploy "
"|awk {'print $1'}") % pvc_name3
ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")