summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArun Kumar <arukumar@redhat.com>2019-07-12 15:00:48 +0530
committerArun <arukumar@redhat.com>2019-11-08 15:49:27 +0530
commit51eb333432e3d98dea7283a013ef6b4dfd93f5bb (patch)
tree1c790a60d3726e3ce612577599c691e3fa323e86
parentc2813c9d15d0ca5379a69c468c4586a623e5613a (diff)
Add TC creating bunch of PVCs during network failure
Create network side failure by opening and closing the ports related to gluster-blockd during creation of the PVC's, Verify PVC's are bound and validate multipath. Change-Id: Ibc53a13e2abb8674661da83d5881a13bbb2ad7fb
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py36
-rw-r--r--tests/functional/gluster_stability/test_gluster_block_stability.py36
2 files changed, 66 insertions, 6 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index cd9f24ca..ad9c45ae 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -274,10 +274,25 @@ class BaseClass(unittest.TestCase):
def get_block_provisioner_for_sc(self):
return get_block_provisioner(self.ocp_client[0])
- def create_and_wait_for_pvcs(self, pvc_size=1,
- pvc_name_prefix="autotests-pvc",
- pvc_amount=1, sc_name=None,
- timeout=120, wait_step=3):
+ def create_and_wait_for_pvcs(
+ self, pvc_size=1, pvc_name_prefix="autotests-pvc", pvc_amount=1,
+ sc_name=None, timeout=120, wait_step=3, skip_waiting=False):
+ """Create multiple PVC's not waiting for it
+
+ Args:
+ pvc_size (int): size of PVC, default value is 1
+ pvc_name_prefix (str): volume prefix for each PVC, default value is
+ 'autotests-pvc'
+ pvc_amount (int): number of PVC's, default value is 1
+ sc_name (str): storage class to create PVC, default value is None,
+ which will cause automatic creation of sc.
+ timeout (int): timeout time for waiting for PVC's to get bound
+ wait_step (int): waiting time between each try of PVC status check
+ skip_waiting (bool): boolean value which defines whether
+ we need to wait for PVC creation or not.
+ Returns:
+ List: list of PVC names
+ """
node = self.ocp_client[0]
# Create storage class if not specified
@@ -299,7 +314,8 @@ class BaseClass(unittest.TestCase):
# Wait for PVCs to be in bound state
try:
- wait_for_pvcs_be_bound(node, pvc_names, timeout, wait_step)
+ if not skip_waiting:
+ wait_for_pvcs_be_bound(node, pvc_names, timeout, wait_step)
finally:
if get_openshift_version() < "3.9":
reclaim_policy = "Delete"
@@ -310,6 +326,8 @@ class BaseClass(unittest.TestCase):
for pvc_name in pvc_names:
if reclaim_policy == 'Retain':
pv_name = get_pv_name_from_pvc(node, pvc_name)
+ if not pv_name and skip_waiting:
+ continue
self.addCleanup(oc_delete, node, 'pv', pv_name,
raise_on_absence=False)
custom = (r':.metadata.annotations."gluster\.kubernetes'
@@ -328,7 +346,6 @@ class BaseClass(unittest.TestCase):
raise_on_error=False)
self.addCleanup(oc_delete, node, 'pvc', pvc_name,
raise_on_absence=False)
-
return pvc_names
def create_and_wait_for_pvc(self, pvc_size=1,
@@ -338,6 +355,13 @@ class BaseClass(unittest.TestCase):
)[0]
return self.pvc_name
+ def create_pvcs_not_waiting(
+ self, pvc_size=1, pvc_name_prefix="autotests-pvc",
+ pvc_amount=1, sc_name=None):
+ return self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix,
+ pvc_amount=pvc_amount, sc_name=sc_name, skip_waiting=True)
+
def create_dcs_with_pvc(self, pvc_names, timeout=600, wait_step=5):
"""Create bunch of DCs with app PODs which use unique PVCs.
diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py
index 3907d0aa..20d07c0f 100644
--- a/tests/functional/gluster_stability/test_gluster_block_stability.py
+++ b/tests/functional/gluster_stability/test_gluster_block_stability.py
@@ -26,6 +26,8 @@ from openshiftstoragelibs.heketi_ops import (
)
from openshiftstoragelibs.node_ops import (
find_vm_name_by_ip_or_hostname,
+ node_add_iptables_rules,
+ node_delete_iptables_rules,
node_reboot_by_command,
power_off_vm_by_name,
power_on_vm_by_name,
@@ -1052,3 +1054,37 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
heketi_blockvolume_delete(
self.heketi_client_node, self.heketi_server_url,
block_volume["id"])
+
+ def test_create_block_pvcs_with_network_failure(self):
+ """Block port 24010 while creating PVC's, run I/O's and verify
+ multipath"""
+ chain = 'OS_FIREWALL_ALLOW'
+ rules = '-p tcp -m state --state NEW -m tcp --dport 24010 -j ACCEPT'
+ sc_name = self.create_storage_class(hacount=len(self.gluster_servers))
+ self.create_and_wait_for_pvc(sc_name=sc_name)
+
+ # Create app pod, validate multipath and run I/O
+ dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)
+ self.verify_iscsi_sessions_and_multipath(self.pvc_name, dc_name)
+ cmd_run_io = 'dd if=/dev/urandom of=/mnt/%s bs=4k count=10000'
+ oc_rsh(self.node, pod_name, cmd_run_io % 'file1')
+
+ # Create 5 PVC's, simultaneously close the port and run I/O
+ pvc_names_for_creations = self.create_pvcs_not_waiting(
+ pvc_amount=5, sc_name=sc_name)
+ try:
+ node_delete_iptables_rules(self.gluster_servers[0], chain, rules)
+ oc_rsh(self.node, pod_name, cmd_run_io % 'file2')
+ finally:
+ # Open the closed port
+ node_add_iptables_rules(self.gluster_servers[0], chain, rules)
+
+ # Wait for PVC's to get bound
+ wait_for_pvcs_be_bound(self.node, pvc_names_for_creations)
+
+ # Create app pods and validate multipath
+ self.verify_iscsi_sessions_and_multipath(self.pvc_name, dc_name)
+ dc_and_pod_names = self.create_dcs_with_pvc(pvc_names_for_creations)
+ for pvc_name, dc_with_pod in dc_and_pod_names.items():
+ self.verify_iscsi_sessions_and_multipath(pvc_name, dc_with_pod[0])
+ oc_rsh(self.node, dc_with_pod[1], cmd_run_io % 'file3')