summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py52
-rw-r--r--tests/functional/gluster_stability/test_gluster_block_stability.py114
-rw-r--r--tests/functional/provisioning/test_dynamic_provisioning_block.py145
3 files changed, 166 insertions, 145 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index 9b6976f6..8ca3a9af 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -11,6 +11,7 @@ from openshiftstoragelibs.exceptions import (
from openshiftstoragelibs.heketi_ops import (
hello_heketi,
heketi_blockvolume_delete,
+ heketi_blockvolume_info,
heketi_volume_delete,
)
from openshiftstoragelibs.openshift_ops import (
@@ -22,12 +23,19 @@ from openshiftstoragelibs.openshift_ops import (
oc_create_secret,
oc_delete,
oc_get_custom_resource,
+ oc_get_pods,
scale_dc_pod_amount_and_wait,
switch_oc_project,
verify_pvc_status_is_bound,
wait_for_pod_be_ready,
wait_for_resource_absence,
)
+from openshiftstoragelibs.openshift_storage_libs import (
+ get_iscsi_block_devices_by_path,
+ get_iscsi_session,
+ get_mpath_name_from_device_name,
+ validate_multipath_pod,
+)
from openshiftstoragelibs.openshift_version import get_openshift_version
@@ -327,3 +335,47 @@ class GlusterBlockBaseClass(BaseClass):
super(GlusterBlockBaseClass, cls).setUpClass()
cls.sc = cls.storage_classes.get(
'storage_class2', cls.storage_classes.get('block_storage_class'))
+
+ def verify_iscsi_sessions_and_multipath(self, pvc_name, dc_name):
+ # Get storage ips of glusterfs pods
+ keys = self.gluster_servers
+ gluster_ips = []
+ for key in keys:
+ gluster_ips.append(self.gluster_servers_info[key]['storage'])
+ gluster_ips.sort()
+
+ # Find iqn and hacount from volume info
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+ custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
+ vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
+ vol_info = heketi_blockvolume_info(
+ self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
+ iqn = vol_info['blockvolume']['iqn']
+ hacount = int(self.sc['hacount'])
+
+ # Find node on which pod is running
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ pod_info = oc_get_pods(
+ self.node, selector='deploymentconfig=%s' % dc_name)
+ node = pod_info[pod_name]['node']
+
+ # Get the iscsi sessions info from the node
+ iscsi = get_iscsi_session(node, iqn)
+ self.assertEqual(hacount, len(iscsi))
+ iscsi.sort()
+ self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))
+
+ # Get the paths info from the node
+ devices = get_iscsi_block_devices_by_path(node, iqn).keys()
+ self.assertEqual(hacount, len(devices))
+
+ # Get mpath names and verify that only one mpath is there
+ mpaths = set()
+ for device in devices:
+ mpaths.add(get_mpath_name_from_device_name(node, device))
+ self.assertEqual(1, len(mpaths))
+
+ validate_multipath_pod(
+ self.node, pod_name, hacount, mpath=list(mpaths)[0])
+
+ return iqn, hacount, node
diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py
new file mode 100644
index 00000000..562fb8be
--- /dev/null
+++ b/tests/functional/gluster_stability/test_gluster_block_stability.py
@@ -0,0 +1,114 @@
+from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
+from openshiftstoragelibs.command import cmd_run
+from openshiftstoragelibs.openshift_ops import (
+ get_pod_name_from_dc,
+ oc_adm_manage_node,
+ oc_delete,
+ oc_get_schedulable_nodes,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence,
+)
+from openshiftstoragelibs.openshift_storage_libs import (
+ get_iscsi_session,
+)
+
+
+class TestGlusterBlockStability(GlusterBlockBaseClass):
+ '''Class that contain gluster-block stability TC'''
+
+ def setUp(self):
+ super(TestGlusterBlockStability, self).setUp()
+ self.node = self.ocp_master_node[0]
+
+ def initiator_side_failures(self):
+ self.create_storage_class()
+ self.create_and_wait_for_pvc()
+
+ # Create app pod
+ dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)
+
+ iqn, _, node = self.verify_iscsi_sessions_and_multipath(
+ self.pvc_name, dc_name)
+
+ # Make node unschedulabe where pod is running
+ oc_adm_manage_node(
+ self.node, '--schedulable=false', nodes=[node])
+
+ # Make node schedulabe where pod is running
+ self.addCleanup(
+ oc_adm_manage_node, self.node, '--schedulable=true',
+ nodes=[node])
+
+ # Delete pod so it get respun on any other node
+ oc_delete(self.node, 'pod', pod_name)
+ wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ # Wait for pod to come up
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
+
+ # Get the iscsi session from the previous node to verify logout
+ iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
+ self.assertFalse(iscsi)
+
+ self.verify_iscsi_sessions_and_multipath(self.pvc_name, dc_name)
+
+ def test_initiator_side_failures_initiator_and_target_on_different_node(
+ self):
+
+ nodes = oc_get_schedulable_nodes(self.node)
+
+ # Get list of all gluster nodes
+ cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
+ "-o=custom-columns=:.spec.nodeName")
+ g_nodes = cmd_run(cmd, self.node)
+ g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
+
+ # Skip test case if required schedulable node count not met
+ if len(set(nodes) - set(g_nodes)) < 2:
+ self.skipTest("skipping test case because it needs at least two"
+ " nodes schedulable")
+
+ # Make containerized Gluster nodes unschedulable
+ if g_nodes:
+ # Make gluster nodes unschedulable
+ oc_adm_manage_node(
+ self.node, '--schedulable=false',
+ nodes=g_nodes)
+
+ # Make gluster nodes schedulable
+ self.addCleanup(
+ oc_adm_manage_node, self.node, '--schedulable=true',
+ nodes=g_nodes)
+
+ self.initiator_side_failures()
+
+ def test_initiator_side_failures_initiator_and_target_on_same_node(self):
+ # Note: This test case is supported for containerized gluster only.
+
+ nodes = oc_get_schedulable_nodes(self.node)
+
+ # Get list of all gluster nodes
+ cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
+ "-o=custom-columns=:.spec.nodeName")
+ g_nodes = cmd_run(cmd, self.node)
+ g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
+
+ # Get the list of nodes other than gluster
+ o_nodes = list((set(nodes) - set(g_nodes)))
+
+ # Skip the test case if it is crs setup
+ if not g_nodes:
+ self.skipTest("skipping test case because it is not a "
+ "containerized gluster setup. "
+ "This test case is for containerized gluster only.")
+
+ # Make other nodes unschedulable
+ oc_adm_manage_node(
+ self.node, '--schedulable=false', nodes=o_nodes)
+
+ # Make other nodes schedulable
+ self.addCleanup(
+ oc_adm_manage_node, self.node, '--schedulable=true', nodes=o_nodes)
+
+ self.initiator_side_failures()
diff --git a/tests/functional/provisioning/test_dynamic_provisioning_block.py b/tests/functional/provisioning/test_dynamic_provisioning_block.py
index 367eb317..ae7198db 100644
--- a/tests/functional/provisioning/test_dynamic_provisioning_block.py
+++ b/tests/functional/provisioning/test_dynamic_provisioning_block.py
@@ -4,19 +4,12 @@ from unittest import skip
from glusto.core import Glusto as g
from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
-from openshiftstoragelibs.openshift_storage_libs import (
- get_iscsi_block_devices_by_path,
- get_iscsi_session,
- get_mpath_name_from_device_name,
- validate_multipath_pod,
-)
from openshiftstoragelibs.command import cmd_run
from openshiftstoragelibs.exceptions import ExecutionError
from openshiftstoragelibs.heketi_ops import (
get_block_hosting_volume_list,
heketi_blockvolume_create,
heketi_blockvolume_delete,
- heketi_blockvolume_info,
heketi_blockvolume_list,
heketi_node_info,
heketi_node_list,
@@ -31,13 +24,11 @@ from openshiftstoragelibs.openshift_ops import (
get_gluster_pod_names_by_pvc_name,
get_pod_name_from_dc,
get_pv_name_from_pvc,
- oc_adm_manage_node,
oc_create_app_dc_with_io,
oc_create_pvc,
oc_delete,
oc_get_custom_resource,
oc_get_pods,
- oc_get_schedulable_nodes,
oc_rsh,
scale_dc_pod_amount_and_wait,
verify_pvc_status_is_bound,
@@ -380,142 +371,6 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
oc_delete(self.node, 'pv', pv_name)
wait_for_resource_absence(self.node, 'pv', pv_name)
- def verify_iscsi_and_multipath(self, pvc_name, dc_name):
- # Get storage ips of glusterfs pods
- keys = self.gluster_servers
- gluster_ips = []
- for key in keys:
- gluster_ips.append(self.gluster_servers_info[key]['storage'])
- gluster_ips.sort()
-
- # Find iqn and hacount from volume info
- pv_name = get_pv_name_from_pvc(self.node, pvc_name)
- custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
- vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
- vol_info = heketi_blockvolume_info(
- self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
- iqn = vol_info['blockvolume']['iqn']
- hacount = int(self.sc['hacount'])
-
- # Find node on which pod is running
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- pod_info = oc_get_pods(
- self.node, selector='deploymentconfig=%s' % dc_name)
- node = pod_info[pod_name]['node']
-
- # Get the iscsi sessions info from the node
- iscsi = get_iscsi_session(node, iqn)
- self.assertEqual(hacount, len(iscsi))
- iscsi.sort()
- self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))
-
- # Get the paths info from the node
- devices = get_iscsi_block_devices_by_path(node, iqn).keys()
- self.assertEqual(hacount, len(devices))
-
- # Get mpath names and verify that only one mpath is there
- mpaths = set()
- for device in devices:
- mpaths.add(get_mpath_name_from_device_name(node, device))
- self.assertEqual(1, len(mpaths))
-
- validate_multipath_pod(
- self.node, pod_name, hacount, mpath=list(mpaths)[0])
-
- return iqn, hacount, node
-
- def initiator_side_failures(self):
- self.create_storage_class()
- self.create_and_wait_for_pvc()
-
- # Create app pod
- dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)
-
- iqn, _, node = self.verify_iscsi_and_multipath(self.pvc_name, dc_name)
-
- # Make node unschedulabe where pod is running
- oc_adm_manage_node(
- self.node, '--schedulable=false', nodes=[node])
-
- # Make node schedulabe where pod is running
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true',
- nodes=[node])
-
- # Delete pod so it get respun on any other node
- oc_delete(self.node, 'pod', pod_name)
- wait_for_resource_absence(self.node, 'pod', pod_name)
-
- # Wait for pod to come up
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Get the iscsi session from the previous node to verify logout
- iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
- self.assertFalse(iscsi)
-
- self.verify_iscsi_and_multipath(self.pvc_name, dc_name)
-
- def test_initiator_side_failures_initiator_and_target_on_different_node(
- self):
-
- nodes = oc_get_schedulable_nodes(self.node)
-
- # Get list of all gluster nodes
- cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
- "-o=custom-columns=:.spec.nodeName")
- g_nodes = cmd_run(cmd, self.node)
- g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
-
- # Skip test case if required schedulable node count not met
- if len(set(nodes) - set(g_nodes)) < 2:
- self.skipTest("skipping test case because it needs at least two"
- " nodes schedulable")
-
- # Make containerized Gluster nodes unschedulable
- if g_nodes:
- # Make gluster nodes unschedulable
- oc_adm_manage_node(
- self.node, '--schedulable=false',
- nodes=g_nodes)
-
- # Make gluster nodes schedulable
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true',
- nodes=g_nodes)
-
- self.initiator_side_failures()
-
- def test_initiator_side_failures_initiator_and_target_on_same_node(self):
- # Note: This test case is supported for containerized gluster only.
-
- nodes = oc_get_schedulable_nodes(self.node)
-
- # Get list of all gluster nodes
- cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
- "-o=custom-columns=:.spec.nodeName")
- g_nodes = cmd_run(cmd, self.node)
- g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
-
- # Get the list of nodes other than gluster
- o_nodes = list((set(nodes) - set(g_nodes)))
-
- # Skip the test case if it is crs setup
- if not g_nodes:
- self.skipTest("skipping test case because it is not a "
- "containerized gluster setup. "
- "This test case is for containerized gluster only.")
-
- # Make other nodes unschedulable
- oc_adm_manage_node(
- self.node, '--schedulable=false', nodes=o_nodes)
-
- # Make other nodes schedulable
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true', nodes=o_nodes)
-
- self.initiator_side_failures()
-
def verify_free_space(self, free_space):
# verify free space on nodes otherwise skip test case
node_list = heketi_node_list(