summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArun Kumar <arukumar@arukumar.remote.csb>2019-09-16 15:17:49 +0530
committerArun <arukumar@redhat.com>2019-11-26 18:42:01 +0530
commitf51ac495904dcd43649dbdc9a3997593aa8f9164 (patch)
treec4221031a71f45cb1c474b9675f5d7eb74491426
parent1ed3d8401476b56e1f84cfdb9eb849f07cc80af8 (diff)
Add TC deleting bunch of PVC's during network failure
Create network failure while deleting PVC's Network side failure is introduced by opening and closing the ports related to gluster-blockd. Change-Id: Id3a749aa1a051bbce99b85046fa0a79831e85dd5
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py16
-rw-r--r--tests/functional/gluster_stability/test_gluster_block_stability.py80
2 files changed, 91 insertions, 5 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
index 56b06297..4e668d68 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -1358,7 +1358,7 @@ def get_pv_name_from_pvc(hostname, pvc_name):
return pv_name
-def get_vol_names_from_pv(hostname, pv_name):
+def get_vol_names_from_pv(hostname, pv_name, vol_type='file'):
'''
Returns the heketi and gluster
vol names of the corresponding PV
@@ -1368,6 +1368,7 @@ def get_vol_names_from_pv(hostname, pv_name):
pv_name (str): pv_name for which we
want to find corresponding
vol names
+ vol_type (str): volume type block or file
Returns:
volname (dict): dict if successful
{"heketi_vol": heketi_vol_name,
@@ -1377,10 +1378,15 @@ def get_vol_names_from_pv(hostname, pv_name):
otherwise raise Exception
'''
vol_dict = {}
- cmd = (r"oc get pv %s -o=custom-columns="
- r":.metadata.annotations."
- r"'gluster\.kubernetes\.io\/heketi\-volume\-id',"
- r":.spec.glusterfs.path" % pv_name)
+ if vol_type == 'block':
+ cmd = (r"oc get pv %s -o=custom-columns="
+ r":.metadata.annotations.'gluster\.org\/volume-id',"
+ r":.metadata.annotations.glusterBlockShare" % pv_name)
+ else:
+ cmd = (r"oc get pv %s -o=custom-columns="
+ r":.metadata.annotations."
+ r"'gluster\.kubernetes\.io\/heketi\-volume\-id',"
+ r":.spec.glusterfs.path" % pv_name)
vol_list = command.cmd_run(cmd, hostname=hostname).split()
vol_dict = {"heketi_vol": vol_list[0],
"gluster_vol": vol_list[1]}
diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py
index e81949f4..59852e98 100644
--- a/tests/functional/gluster_stability/test_gluster_block_stability.py
+++ b/tests/functional/gluster_stability/test_gluster_block_stability.py
@@ -37,6 +37,7 @@ from openshiftstoragelibs.openshift_ops import (
get_ocp_gluster_pod_details,
get_pod_name_from_dc,
get_pv_name_from_pvc,
+ get_vol_names_from_pv,
kill_service_on_gluster_pod_or_node,
match_pv_and_heketi_block_volumes,
oc_adm_manage_node,
@@ -1245,3 +1246,82 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.verify_all_paths_are_up_in_multipath(
list(mpaths)[0], hacount, ini_node, timeout=1)
+
+ def get_vol_id_and_vol_names_from_pvc_names(self, pvc_names):
+ vol_details = []
+ for pvc_name in pvc_names:
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+ volume = get_vol_names_from_pv(
+ self.node, pv_name, vol_type='block')
+ vol_details.append(volume)
+ return vol_details
+
+ def check_errors_in_heketi_pod_network_failure_after_deletion(
+ self, since_time, vol_names):
+ # Get name of heketi pod
+ heketi_pod_name = get_pod_name_from_dc(self.node, self.heketi_dc_name)
+
+ # Check for errors in heketi pod logs
+ err_cmd = (
+ r'oc logs %s --since-time="%s" | grep " Failed to run command '
+ r'\[gluster-block delete*"' % (heketi_pod_name, since_time))
+
+ for w in Waiter(30, 5):
+ err_out = cmd_run(err_cmd, self.node)
+ if any(vol_name in err_out for vol_name in vol_names):
+ break
+ if w.expired:
+ err_msg = ("Expected ERROR for volumes '%s' not generated in "
+ "heketi pod logs" % vol_names)
+ raise AssertionError(err_msg)
+
+ def test_delete_block_pvcs_with_network_failure(self):
+ """Block port 24010 while deleting PVC's"""
+ pvc_amount, pvc_delete_amount = 10, 5
+ gluster_node = self.gluster_servers[0]
+ chain = 'OS_FIREWALL_ALLOW'
+ rules = '-p tcp -m state --state NEW -m tcp --dport 24010 -j ACCEPT'
+
+ sc_name = self.create_storage_class(hacount=len(self.gluster_servers))
+
+ # Get the total free space available
+ initial_free_storage = get_total_free_space(
+ self.heketi_client_node, self.heketi_server_url)
+
+ # Create 10 PVC's, get their PV names and volume ids
+ pvc_names = self.create_and_wait_for_pvcs(
+ sc_name=sc_name, pvc_amount=pvc_amount, timeout=240)
+ vol_details = self.get_vol_id_and_vol_names_from_pvc_names(pvc_names)
+ vol_names = [vol_name['gluster_vol'] for vol_name in vol_details]
+
+ # Delete 5 PVCs not waiting for the results
+ oc_delete(self.node, 'pvc', " ".join(pvc_names[:pvc_delete_amount]))
+
+ # Get time to collect logs
+ since_time = cmd_run(
+ 'date -u --rfc-3339=ns| cut -d "+" -f 1', self.node).replace(
+ " ", "T") + "Z"
+
+ try:
+ # Close the port #24010 on gluster node and then delete other 5 PVC
+ # without wait
+ node_delete_iptables_rules(gluster_node, chain, rules)
+ oc_delete(
+ self.node, 'pvc', ' '.join(pvc_names[pvc_delete_amount:]))
+ self.addCleanup(
+ wait_for_resources_absence,
+ self.node, 'pvc', pvc_names[pvc_delete_amount:])
+
+ # Check errors in heketi pod logs
+ self.check_errors_in_heketi_pod_network_failure_after_deletion(
+ since_time, vol_names[pvc_delete_amount:])
+ finally:
+ # Open port 24010
+ node_add_iptables_rules(gluster_node, chain, rules)
+
+ # validate available free space is same
+ final_free_storage = get_total_free_space(
+ self.heketi_client_node, self.heketi_server_url)
+ msg = ("Available free space %s is not same as expected %s"
+ % (initial_free_storage, final_free_storage))
+ self.assertEqual(initial_free_storage, final_free_storage, msg)