summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py60
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py482
2 files changed, 253 insertions, 289 deletions
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index 4131ea9c..bc7511dc 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -614,6 +614,66 @@ def scale_dc_pod_amount_and_wait(hostname, dc_name,
wait_for_pod_be_ready(hostname, pod)
+def get_gluster_pod_names_by_pvc_name(ocp_node, pvc_name):
+ """Get Gluster POD names, whose nodes store bricks for specified PVC.
+
+ Args:
+ ocp_node (str): Node to execute OCP commands on.
+ pvc_name (str): Name of a PVC to get related Gluster PODs.
+ Returns:
+ list: List of dicts, which consist of following 3 key-value pairs:
+ pod_name=<pod_name_value>,
+ host_name=<host_name_value>,
+ host_ip=<host_ip_value>
+ """
+ # Get node IPs
+ pv_info = get_gluster_vol_info_by_pvc_name(ocp_node, pvc_name)
+ gluster_pod_nodes_ips = [
+ brick["name"].split(":")[0]
+ for brick in pv_info["bricks"]["brick"]
+ ]
+
+ # Get node names
+ get_node_names_cmd = (
+ "oc get node -o wide | grep -e '%s ' | awk '{print $1}'" % (
+ " ' -e '".join(gluster_pod_nodes_ips)))
+ gluster_pod_node_names = command.cmd_run(
+ get_node_names_cmd, hostname=ocp_node)
+ gluster_pod_node_names = [
+ node_name.strip()
+ for node_name in gluster_pod_node_names.split("\n")
+ if node_name.strip()
+ ]
+ node_count = len(gluster_pod_node_names)
+ err_msg = "Expected more than one node hosting Gluster PODs. Got '%s'." % (
+ node_count)
+ assert (node_count > 1), err_msg
+
+ # Get Gluster POD names which are located on the filtered nodes
+ get_pod_name_cmd = (
+ "oc get pods --all-namespaces "
+ "-o=custom-columns=:.metadata.name,:.spec.nodeName,:.status.hostIP | "
+ "grep 'glusterfs-' | grep -e '%s '" % "' -e '".join(
+ gluster_pod_node_names)
+ )
+ out = command.cmd_run(
+ get_pod_name_cmd, hostname=ocp_node)
+ data = []
+ for line in out.split("\n"):
+ pod_name, host_name, host_ip = [
+ el.strip() for el in line.split(" ") if el.strip()]
+ data.append({
+ "pod_name": pod_name,
+ "host_name": host_name,
+ "host_ip": host_ip,
+ })
+ pod_count = len(data)
+ err_msg = "Expected 3 or more Gluster PODs to be found. Actual is '%s'" % (
+ pod_count)
+ assert (pod_count > 2), err_msg
+ return data
+
+
def get_gluster_vol_info_by_pvc_name(ocp_node, pvc_name):
"""Get Gluster volume info based on the PVC name.
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index e1351c93..a85553ec 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -1,9 +1,7 @@
import time
+from unittest import skip
from cnslibs.common.dynamic_provisioning import (
- create_mongodb_pod,
- create_secret_file,
- create_storage_class_file,
get_pvc_status,
get_pod_name_from_dc,
verify_pvc_status_is_bound,
@@ -13,12 +11,12 @@ from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_ops import (
verify_volume_name_prefix)
from cnslibs.common.openshift_ops import (
- get_ocp_gluster_pod_names,
+ get_gluster_pod_names_by_pvc_name,
oc_create_secret,
oc_create_sc,
- oc_create,
oc_create_pvc,
oc_create_app_dc_with_io,
+ oc_create_tiny_pod_with_volume,
oc_delete,
oc_rsh,
scale_dc_pod_amount_and_wait,
@@ -32,81 +30,94 @@ class TestDynamicProvisioningP0(CnsBaseClass):
Class that contain P0 dynamic provisioning test cases for
glusterfile volume
'''
- def dynamic_provisioning_glusterfile(self, heketi_volname_prefix=False):
- storage_class = self.cns_storage_class['storage_class1']
+
+ def setUp(self):
+ super(TestDynamicProvisioningP0, self).setUp()
+ self.node = self.ocp_master_node[0]
+ self.sc = self.cns_storage_class['storage_class1']
+
+ def _create_storage_class(self, create_name_prefix=False):
+ sc = self.cns_storage_class['storage_class1']
secret = self.cns_secret['secret1']
- node = self.ocp_master_node[0]
- sc_name = storage_class['name']
- cmd = ("oc get svc %s "
- "-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
- ret, out, err = g.run(node, cmd, "root")
- self.assertEqual(
- ret, 0, "failed to execute command %s on %s" % (cmd, node))
- heketi_cluster_ip = out.lstrip().strip()
- resturl = "http://%s:8080" % heketi_cluster_ip
- ret = create_storage_class_file(
- node,
- sc_name,
- resturl,
- storage_class['provisioner'],
- restuser=storage_class['restuser'],
- secretnamespace=storage_class['secretnamespace'],
- secretname=secret['secret_name'],
- **({"volumenameprefix": storage_class['volumenameprefix']}
- if heketi_volname_prefix else {}))
- self.assertTrue(ret, "creation of storage-class file failed")
- provisioner_name = storage_class['provisioner'].split("/")
- file_path = "/%s-%s-storage-class.yaml" % (
- sc_name, provisioner_name[1])
- oc_create(node, file_path)
- self.addCleanup(oc_delete, node, 'sc', sc_name)
- ret = create_secret_file(node,
- secret['secret_name'],
- secret['namespace'],
- self.secret_data_key,
- secret['type'])
- self.assertTrue(ret, "creation of heketi-secret file failed")
- oc_create(node, "/%s.yaml" % secret['secret_name'])
- self.addCleanup(oc_delete, node, 'secret', secret['secret_name'])
+
+ # Create secret file for usage in storage class
+ self.secret_name = oc_create_secret(
+ self.node, namespace=secret['namespace'],
+ data_key=self.heketi_cli_key, secret_type=secret['type'])
+ self.addCleanup(
+ oc_delete, self.node, 'secret', self.secret_name)
+
+ # Create storage class
+ self.sc_name = oc_create_sc(
+ self.node,
+ resturl=sc['resturl'],
+ restuser=sc['restuser'], secretnamespace=sc['secretnamespace'],
+ secretname=self.secret_name,
+ **({"volumenameprefix": sc['volumenameprefix']}
+ if create_name_prefix else {})
+ )
+ self.addCleanup(oc_delete, self.node, 'sc', self.sc_name)
+
+ def _create_and_wait_for_pvcs(self, pvc_size=1,
+ pvc_name_prefix='autotests-pvc',
+ pvc_amount=1):
+ # Create PVCs
+ pvc_names = []
+ for i in range(pvc_amount):
+ pvc_name = oc_create_pvc(
+ self.node, self.sc_name, pvc_name_prefix=pvc_name_prefix,
+ pvc_size=pvc_size)
+ pvc_names.append(pvc_name)
+ self.addCleanup(
+ wait_for_resource_absence, self.node, 'pvc', pvc_name)
+ for pvc_name in pvc_names:
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
+
+ # Wait for PVCs to be in bound state
+ for pvc_name in pvc_names:
+ verify_pvc_status_is_bound(self.node, pvc_name)
+
+ return pvc_names
+
+ def _create_and_wait_for_pvc(self, pvc_size=1,
+ pvc_name_prefix='autotests-pvc'):
+ self.pvc_name = self._create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix)[0]
+ return self.pvc_name
+
+ def dynamic_provisioning_glusterfile(self, heketi_volname_prefix=False):
+ # Create secret and storage class
+ self._create_storage_class(heketi_volname_prefix)
# Create PVC
- pvc_name = oc_create_pvc(node, sc_name)
- self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name)
- self.addCleanup(oc_delete, node, 'pvc', pvc_name)
- verify_pvc_status_is_bound(node, pvc_name)
+ pvc_name = self._create_and_wait_for_pvc()
# Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(self.ocp_master_node[0], pvc_name)
- self.addCleanup(oc_delete, node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)
+ dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
+ self.addCleanup(oc_delete, self.node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
- pod_name = get_pod_name_from_dc(node, dc_name)
- wait_for_pod_be_ready(node, pod_name)
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
# Verify Heketi volume name for prefix presence if provided
if heketi_volname_prefix:
- ret = verify_volume_name_prefix(self.ocp_master_node[0],
- storage_class['volumenameprefix'],
- storage_class['secretnamespace'],
- pvc_name, resturl)
+ ret = verify_volume_name_prefix(self.node,
+ self.sc['volumenameprefix'],
+ self.sc['secretnamespace'],
+ pvc_name, self.sc['resturl'])
self.assertTrue(ret, "verify volnameprefix failed")
# Make sure we are able to work with files on the mounted volume
filepath = "/mnt/file_for_testing_io.log"
- cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, node))
-
- cmd = "ls -lrt %s" % filepath
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, node))
-
- cmd = "rm -rf %s" % filepath
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, node))
+ for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
+ "ls -lrt %s",
+ "rm -rf %s"):
+ cmd = cmd % filepath
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertEqual(
+ ret, 0,
+ "Failed to execute '%s' command on %s" % (cmd, self.node))
def test_dynamic_provisioning_glusterfile(self):
g.log.info("test_dynamic_provisioning_glusterfile")
@@ -117,234 +128,131 @@ class TestDynamicProvisioningP0(CnsBaseClass):
self.dynamic_provisioning_glusterfile(True)
def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
- g.log.info("test_dynamic_provisioning_glusterfile_Heketipod_Failure")
- storage_class = self.cns_storage_class['storage_class1']
- secret = self.cns_secret['secret1']
- sc_name = storage_class['name']
- pvc_name2 = "mongodb2"
- cmd = ("oc get svc %s "
- "-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- heketi_cluster_ip = out.lstrip().strip()
- resturl = "http://%s:8080" % heketi_cluster_ip
- ret = create_storage_class_file(
- self.ocp_master_node[0],
- sc_name,
- resturl,
- storage_class['provisioner'],
- restuser=storage_class['restuser'],
- secretnamespace=storage_class['secretnamespace'],
- secretname=secret['secret_name'])
- self.assertTrue(ret, "creation of storage-class file failed")
- provisioner_name = storage_class['provisioner'].split("/")
- file_path = "/%s-%s-storage-class.yaml" % (
- sc_name, provisioner_name[1])
- oc_create(self.ocp_master_node[0], file_path)
- self.addCleanup(oc_delete, self.ocp_master_node[0],
- 'sc', sc_name)
- ret = create_secret_file(self.ocp_master_node[0],
- secret['secret_name'],
- secret['namespace'],
- self.secret_data_key,
- secret['type'])
- self.assertTrue(ret, "creation of heketi-secret file failed")
- oc_create(self.ocp_master_node[0],
- "/%s.yaml" % secret['secret_name'])
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
- secret['secret_name'])
-
- # Create App pod #1 and write data to it
- ret = create_mongodb_pod(self.ocp_master_node[0], pvc_name2,
- 10, sc_name)
- self.assertTrue(ret, "creation of mongodb pod failed")
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
- pvc_name2)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
- pvc_name2)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
- pvc_name2)
- pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name2)
- ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name,
- wait_step=5, timeout=300)
- self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
- "bs=1K count=100")
- ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
+ mount_path = "/mnt"
+ datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())
+
+ # Create secret and storage class
+ self._create_storage_class()
+
+ # Create PVC
+ app_1_pvc_name = self._create_and_wait_for_pvc()
+
+ # Create app POD with attached volume
+ app_1_pod_name = oc_create_tiny_pod_with_volume(
+ self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
+ mount_path=mount_path)
+ self.addCleanup(
+ wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
+ self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)
+
+ # Wait for app POD be up and running
+ wait_for_pod_be_ready(
+ self.node, app_1_pod_name, timeout=60, wait_step=2)
+
+ # Write data to the app POD
+ write_data_cmd = (
+ "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
+ ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
+ self.assertEqual(
+ ret, 0,
+ "Failed to execute command %s on %s" % (write_data_cmd, self.node))
# Remove Heketi pod
heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
self.heketi_dc_name, self.cns_project_name)
heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
self.heketi_dc_name, self.cns_project_name)
- self.addCleanup(g.run, self.ocp_master_node[0], heketi_up_cmd, "root")
- ret, out, err = g.run(self.ocp_master_node[0], heketi_down_cmd, "root")
-
- get_heketi_podname_cmd = (
- "oc get pods --all-namespaces -o=custom-columns=:.metadata.name "
- "--no-headers=true "
- "--selector deploymentconfig=%s" % self.heketi_dc_name)
- ret, out, err = g.run(self.ocp_master_node[0], get_heketi_podname_cmd)
- wait_for_resource_absence(self.ocp_master_node[0], 'pod', out.strip())
-
- # Create App pod #2
- pvc_name3 = "mongodb3"
- ret = create_mongodb_pod(self.ocp_master_node[0],
- pvc_name3, 10, sc_name)
- self.assertTrue(ret, "creation of mongodb pod failed")
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
- pvc_name3)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
- pvc_name3)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
- pvc_name3)
- ret, status = get_pvc_status(self.ocp_master_node[0],
- pvc_name3)
- self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
- self.assertEqual(status, "Pending", "pvc status of "
- "%s is not in Pending state" % pvc_name3)
-
- # Bring Heketi pod back
- ret, out, err = g.run(self.ocp_master_node[0], heketi_up_cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- heketi_up_cmd, self.ocp_master_node[0]))
-
- # Wait small amount of time before newly scheduled Heketi POD appears
+ self.addCleanup(self.cmd_run, heketi_up_cmd)
+ heketi_pod_name = get_pod_name_from_dc(
+ self.node, self.heketi_dc_name, timeout=10, wait_step=3)
+ self.cmd_run(heketi_down_cmd)
+ wait_for_resource_absence(self.node, 'pod', heketi_pod_name)
+
+ # Create second PVC
+ app_2_pvc_name = oc_create_pvc(
+ self.node, self.sc_name, pvc_name_prefix='autotests-pvc',
+ pvc_size=1)
+ self.addCleanup(
+ wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pvc', app_2_pvc_name)
+
+ # Check status of the second PVC after small pause
time.sleep(2)
+ ret, status = get_pvc_status(self.node, app_2_pvc_name)
+ self.assertTrue(ret, "Failed to get pvc status of %s" % app_2_pvc_name)
+ self.assertEqual(
+ status, "Pending",
+ "PVC status of %s is not in Pending state" % app_2_pvc_name)
+
+ # Create second app POD
+ app_2_pod_name = oc_create_tiny_pod_with_volume(
+ self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
+ mount_path=mount_path)
+ self.addCleanup(
+ wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
+ self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)
+
+ # Bring Heketi POD back
+ self.cmd_run(heketi_up_cmd)
# Wait for Heketi POD be up and running
- ret, out, err = g.run(self.ocp_master_node[0], get_heketi_podname_cmd)
- ret = wait_for_pod_be_ready(self.ocp_master_node[0], out.strip(),
- wait_step=5, timeout=120)
- self.assertTrue(ret, "verify heketi pod status as running failed")
-
- # Verify App pod #2
- cmd = ("oc get svc %s "
- "-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- heketi_cluster_new_ip = out.lstrip().strip()
- if heketi_cluster_new_ip != heketi_cluster_ip:
- oc_delete(self.ocp_master_node[0], 'sc', sc_name)
- resturl = "http://%s:8080" % heketi_cluster_ip
- ret = create_storage_class_file(
- self.ocp_master_node[0],
- sc_name,
- resturl,
- storage_class['provisioner'],
- restuser=storage_class['restuser'],
- secretnamespace=storage_class['secretnamespace'],
- secretname=storage_class['secretname'])
- self.assertTrue(ret, "creation of storage-class file failed")
- provisioner_name = storage_class['provisioner'].split("/")
- file_path = "/%s-%s-storage-class.yaml" % (
- sc_name, provisioner_name[1])
- oc_create(self.ocp_master_node[0], file_path)
- for w in Waiter(600, 30):
- ret, status = get_pvc_status(self.ocp_master_node[0],
- pvc_name3)
- self.assertTrue(ret, "failed to get pvc status of %s" % (
- pvc_name3))
- if status != "Bound":
- g.log.info("pvc status of %s is not in Bound state,"
- " sleeping for 30 sec" % pvc_name3)
- continue
- else:
- break
- if w.expired:
- error_msg = ("exceeded timeout 600 sec, pvc %s not in"
- " Bound state" % pvc_name3)
- g.log.error(error_msg)
- raise ExecutionError(error_msg)
- self.assertEqual(status, "Bound", "pvc status of %s "
- "is not in Bound state, its state is %s" % (
- pvc_name3, status))
- pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name3)
- ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name,
- wait_step=5, timeout=300)
- self.assertTrue(ret, "verify %s pod status "
- "as running failed" % pvc_name3)
- cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
- "bs=1K count=100")
- ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
+ new_heketi_pod_name = get_pod_name_from_dc(
+ self.node, self.heketi_dc_name, timeout=10, wait_step=2)
+ wait_for_pod_be_ready(
+ self.node, new_heketi_pod_name, wait_step=5, timeout=120)
+ # Wait for second PVC and app POD be ready
+ verify_pvc_status_is_bound(self.node, app_2_pvc_name)
+ wait_for_pod_be_ready(
+ self.node, app_2_pod_name, timeout=60, wait_step=2)
+
+ # Verify that we are able to write data
+ ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
+ self.assertEqual(
+ ret, 0,
+ "Failed to execute command %s on %s" % (write_data_cmd, self.node))
+
+ @skip("Blocked by BZ-1632873")
def test_dynamic_provisioning_glusterfile_glusterpod_failure(self):
- g.log.info("test_dynamic_provisioning_glusterfile_Glusterpod_Failure")
- storage_class = self.cns_storage_class['storage_class1']
- secret = self.cns_secret['secret1']
- sc_name = storage_class['name']
- pvc_name4 = "mongodb4"
- cmd = ("oc get svc %s "
- "-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- heketi_cluster_ip = out.lstrip().strip()
- resturl = "http://%s:8080" % heketi_cluster_ip
- ret = create_storage_class_file(
- self.ocp_master_node[0],
- sc_name,
- resturl,
- storage_class['provisioner'],
- restuser=storage_class['restuser'],
- secretnamespace=storage_class['secretnamespace'],
- secretname=secret['secret_name'])
- self.assertTrue(ret, "creation of storage-class file failed")
- provisioner_name = storage_class['provisioner'].split("/")
- file_path = "/%s-%s-storage-class.yaml" % (
- sc_name, provisioner_name[1])
- oc_create(self.ocp_master_node[0], file_path)
- self.addCleanup(oc_delete, self.ocp_master_node[0],
- 'sc', sc_name)
- ret = create_secret_file(self.ocp_master_node[0],
- secret['secret_name'],
- secret['namespace'],
- self.secret_data_key,
- secret['type'])
- self.assertTrue(ret, "creation of heketi-secret file failed")
- oc_create(self.ocp_master_node[0],
- "/%s.yaml" % secret['secret_name'])
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
- secret['secret_name'])
- ret = create_mongodb_pod(self.ocp_master_node[0],
- pvc_name4, 30, sc_name)
- self.assertTrue(ret, "creation of mongodb pod failed")
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
- pvc_name4)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
- pvc_name4)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
- pvc_name4)
- pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name4)
- ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name,
- wait_step=5, timeout=300)
- self.assertTrue(ret, "verify mongodb pod status as running failed")
- io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file "
- "bs=1000K count=1000") % pod_name
- proc = g.run_async(self.ocp_master_node[0], io_cmd, "root")
- gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0])
- g.log.info("gluster_pod_list - %s" % gluster_pod_list)
- gluster_pod_name = gluster_pod_list[0]
- cmd = ("oc get pods -o wide | grep %s | grep -v deploy "
- "| awk '{print $7}'") % gluster_pod_name
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- gluster_pod_node_name = out.strip().split("\n")[0].strip()
- oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name)
+ mount_path = "/mnt"
+ datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())
+
+ # Create secret and storage class
+ self._create_storage_class()
+
+ # Create PVC
+ pvc_name = self._create_and_wait_for_pvc()
+
+ # Create app POD with attached volume
+ pod_name = oc_create_tiny_pod_with_volume(
+ self.node, pvc_name, "test-pvc-mount-on-app-pod",
+ mount_path=mount_path)
+ self.addCleanup(
+ wait_for_resource_absence, self.node, 'pod', pod_name)
+ self.addCleanup(oc_delete, self.node, 'pod', pod_name)
+
+ # Wait for app POD be up and running
+ wait_for_pod_be_ready(
+ self.node, pod_name, timeout=60, wait_step=2)
+
+ # Run IO in background
+ io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % (
+ pod_name, datafile_path)
+ async_io = g.run_async(self.node, io_cmd, "root")
+
+ # Pick up one of the hosts which stores PV brick (4+ nodes case)
+ gluster_pod_data = get_gluster_pod_names_by_pvc_name(
+ self.node, pvc_name)[0]
+
+ # Delete glusterfs POD from chosen host and wait for spawn of new one
+ oc_delete(self.node, 'pod', gluster_pod_data["pod_name"])
cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
"grep -v Terminating | awk '{print $1}'") % (
- gluster_pod_node_name)
+ gluster_pod_data["host_name"])
for w in Waiter(600, 30):
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ out = self.cmd_run(cmd)
new_gluster_pod_name = out.strip().split("\n")[0].strip()
- if ret == 0 and not new_gluster_pod_name:
+ if not new_gluster_pod_name:
continue
else:
break
@@ -352,17 +260,13 @@ class TestDynamicProvisioningP0(CnsBaseClass):
error_msg = "exceeded timeout, new gluster pod not created"
g.log.error(error_msg)
raise ExecutionError(error_msg)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
new_gluster_pod_name = out.strip().split("\n")[0].strip()
g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
- ret = wait_for_pod_be_ready(self.ocp_master_node[0],
- new_gluster_pod_name)
- self.assertTrue(ret, "verify %s pod status as running "
- "failed" % new_gluster_pod_name)
- ret, out, err = proc.async_communicate()
- self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd,
- self.ocp_master_node[0]))
+ wait_for_pod_be_ready(self.node, new_gluster_pod_name)
+
+ # Check that async IO was not interrupted
+ ret, out, err = async_io.async_communicate()
+ self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
def test_storage_class_mandatory_params_glusterfile(self):
# CNS-442 storage-class mandatory parameters