summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSushil Gupta <susgupta@redhat.com>2020-08-26 12:07:18 +0530
committerSushil Gupta <susgupta@redhat.com>2020-09-10 04:24:38 +0530
commitb7bd21e4d0f24a0de267a2d569b216da87a9888a (patch)
treede0e9c12a05c14273b53be85ef9fb90deaec1480
parentc82a3789f70a1aa5caae7187072350a1c741e119 (diff)
[Test] Add TC to verify dev path on file & block pvc with vol reboot
Change-Id: Ie072f0365a3d4cf71142e07f6387c8653a616945 Signed-off-by: Sushil Gupta <susgupta@redhat.com>
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py4
-rw-r--r--tests/functional/provisioning/test_dev_path_mapping_block.py101
-rw-r--r--tests/functional/provisioning/test_dev_path_mapping_file.py97
3 files changed, 201 insertions, 1 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index 1204570..81327c1 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -433,7 +433,7 @@ class BaseClass(unittest.TestCase):
def create_dcs_with_pvc(
self, pvc_names, timeout=600, wait_step=5,
- dc_name_prefix='autotests-dc', label=None,
+ dc_name_prefix='autotests-dc', space_to_use=1048576, label=None,
skip_cleanup=False, is_busybox=False):
"""Create bunch of DCs with app PODs which use unique PVCs.
@@ -443,6 +443,7 @@ class BaseClass(unittest.TestCase):
timeout (int): timeout value, default value is 600 seconds.
wait_step( int): wait step, default value is 5 seconds.
dc_name_prefix(str): name prefix for deployement config.
+ space_to_use(int): space to use for io's in KB.
label (dict): keys and value for adding label into DC.
is_busybox (bool): True for busybox app pod else default is False
Returns: dictionary with following structure:
@@ -461,6 +462,7 @@ class BaseClass(unittest.TestCase):
oc_create_app_dc_with_io)
for pvc_name in pvc_names:
dc_name = function(self.ocp_client[0], pvc_name,
+ space_to_use=space_to_use,
dc_name_prefix=dc_name_prefix, label=label)
dc_names[pvc_name] = dc_name
if not skip_cleanup:
diff --git a/tests/functional/provisioning/test_dev_path_mapping_block.py b/tests/functional/provisioning/test_dev_path_mapping_block.py
index b512913..2edbbef 100644
--- a/tests/functional/provisioning/test_dev_path_mapping_block.py
+++ b/tests/functional/provisioning/test_dev_path_mapping_block.py
@@ -3,8 +3,10 @@ import pytest
from glusto.core import Glusto as g
from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import command
from openshiftstoragelibs import heketi_ops
from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_ops
from openshiftstoragelibs import openshift_storage_libs
from openshiftstoragelibs import podcmd
@@ -86,3 +88,102 @@ class TestDevPathMapping(baseclass.GlusterBlockBaseClass):
self.create_dcs_with_pvc(pvcs)
self.validate_block_volumes_count(
self.h_node, self.h_server, self.node_ip)
+
+ def _get_space_use_percent_in_app_pod(self, pod_name):
+ """Check if IO's are running in the app pod"""
+
+ use_percent = []
+ cmd = "oc exec {} -- df -h /mnt | tail -1"
+
+ # Run 10 times to track the percentage used
+ for _ in range(10):
+ out = command.cmd_run(cmd.format(pod_name), self.node).split()[3]
+ self.assertTrue(
+ out, "Failed to fetch mount point details from the pod "
+ "{}".format(pod_name))
+ use_percent.append(out[:-1])
+ return use_percent
+
+ def _create_app_pod_and_verify_pvs(self):
+ """Create block volume with app pod and verify IO's. Compare path,
+ uuid, vg_name.
+ """
+ pvc_size, pvc_amount = 2, 1
+
+ # Space to use for io's in KB
+ space_to_use = 104857600
+
+ # Create block volumes
+ pvc_name = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+
+ # Create dcs and app pods with I/O on it
+ dc_name = self.create_dcs_with_pvc(pvc_name, space_to_use=space_to_use)
+
+ # Pod names list
+ pod_name = [pod_name for _, pod_name in list(dc_name.values())][0]
+ self.assertTrue(
+ pod_name, "Failed to get the pod name from {}".format(dc_name))
+
+ # Fetch dc list
+ dc_name = [pod for pod, _ in list(dc_name.values())][0]
+
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Check if IO's are running
+ use_percent_before = self._get_space_use_percent_in_app_pod(pod_name)
+
+ # Compare volumes
+ self.validate_file_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+
+ # Check if IO's are running
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent_before, use_percent_after,
+ "Failed to execute IO's in the app pod {}".format(
+ pod_name))
+
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected: {},"
+ " Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected: {},"
+ " Actual:{}".format(vg_name, _vg_name))
+ return pod_name, dc_name, use_percent_before
+
+ @pytest.mark.tier2
+ @podcmd.GlustoPod()
+ def test_dev_path_mapping_app_pod_with_block_volume_reboot(self):
+ """Validate dev path mapping for app pods with block volume after reboot
+ """
+ # Create block volume with app pod and verify IO's
+ # and Compare path, uuid, vg_name
+ pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
+
+ # Delete app pods
+ openshift_ops.oc_delete(self.node, 'pod', pod_name)
+ openshift_ops.wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ # Wait for the new app pod to come up
+ # dc_name = [pod for pod, _ in list(dc_name.values())][0]
+ self.assertTrue(
+ dc_name, "Failed to get the dc name from {}".format(dc_name))
+ pod_name = openshift_ops.get_pod_name_from_dc(self.node, dc_name)
+ openshift_ops.wait_for_pod_be_ready(self.node, pod_name)
+
+ # Check if IO's are running after respin of app pod
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
diff --git a/tests/functional/provisioning/test_dev_path_mapping_file.py b/tests/functional/provisioning/test_dev_path_mapping_file.py
index 57d7b34..bee37d6 100644
--- a/tests/functional/provisioning/test_dev_path_mapping_file.py
+++ b/tests/functional/provisioning/test_dev_path_mapping_file.py
@@ -3,8 +3,10 @@ import pytest
from glusto.core import Glusto as g
from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import command
from openshiftstoragelibs import heketi_ops
from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_ops
from openshiftstoragelibs import openshift_storage_libs
from openshiftstoragelibs import podcmd
@@ -86,3 +88,98 @@ class TestDevPathMapping(baseclass.BaseClass):
self.create_dcs_with_pvc(pvcs)
self.validate_file_volumes_count(
self.h_node, self.h_server, self.node_ip)
+
+ def _get_space_use_percent_in_app_pod(self, pod_name):
+ """Check if IO's are running in the app pod"""
+
+ use_percent = []
+ cmd = "oc exec {} -- df -h /mnt | tail -1"
+
+ # Run 10 times to track the percentage used
+ for _ in range(10):
+ out = command.cmd_run(cmd.format(pod_name), self.node).split()[3]
+ self.assertTrue(
+ out, "Failed to fetch mount point details from the pod "
+ "{}".format(pod_name))
+ use_percent.append(out[:-1])
+ return use_percent
+
+ def _create_app_pod_and_verify_pvs(self):
+ """Create file volume with app pod and verify IO's. Compare path,
+ uuid, vg_name.
+ """
+ pvc_size, pvc_amount = 2, 1
+
+ # Space to use for io's in KB
+ space_to_use = 104857600
+
+ # Create file volumes
+ pvc_name = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+
+ # Create dcs and app pods with I/O on it
+ dc_name = self.create_dcs_with_pvc(pvc_name, space_to_use=space_to_use)
+
+ # Pod names list
+ pod_name = [pod_name for _, pod_name in list(dc_name.values())][0]
+ self.assertTrue(
+ pod_name, "Failed to get the pod name from {}".format(dc_name))
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Check if IO's are running
+ use_percent_before = self._get_space_use_percent_in_app_pod(pod_name)
+
+ # Compare volumes
+ self.validate_file_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+
+ # Check if IO's are running
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent_before, use_percent_after,
+ "Failed to execute IO's in the app pod {}".format(
+ pod_name))
+
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected: {},"
+ " Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected: {},"
+ " Actual:{}".format(vg_name, _vg_name))
+ return pod_name, dc_name, use_percent_before
+
+ @pytest.mark.tier2
+ @podcmd.GlustoPod()
+ def test_dev_path_mapping_app_pod_with_file_volume_reboot(self):
+ """Validate dev path mapping for app pods with file volume after reboot
+ """
+ # Create file volume with app pod and verify IO's
+ # and Compare path, uuid, vg_name
+ pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
+
+ # Delete app pods
+ openshift_ops.oc_delete(self.node, 'pod', pod_name)
+ openshift_ops.wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ # Wait for the new app pod to come up
+ dc_name = [pod for pod, _ in list(dc_name.values())][0]
+ self.assertTrue(
+ dc_name, "Failed to get the dc name from {}".format(dc_name))
+ pod_name = openshift_ops.get_pod_name_from_dc(self.node, dc_name)
+ openshift_ops.wait_for_pod_be_ready(self.node, pod_name)
+
+ # Check if IO's are running after respin of app pod
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))