summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py9
-rw-r--r--tests/functional/logging/test_logging_validations.py80
2 files changed, 86 insertions, 3 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
index 0ed293d7..a228e190 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -465,7 +465,7 @@ def oc_create_tiny_pod_with_volume(hostname, pvc_name, pod_name_prefix='',
def oc_delete(
ocp_node, rtype, name, raise_on_absence=True, collect_logs=False,
- skip_res_validation=True):
+ skip_res_validation=True, is_force=False):
"""Delete an OCP resource by name
Args:
@@ -478,6 +478,7 @@ def oc_delete(
default value: True
collect_logs (bool): Collect logs before deleting resource
skip_res_validation(bool): To validate before deletion of resource.
+ is_force (bool): True for deleting forcefully, default is False
"""
if skip_res_validation and not oc_get_yaml(
ocp_node, rtype, name, raise_on_error=raise_on_absence):
@@ -493,6 +494,10 @@ def oc_delete(
if openshift_version.get_openshift_version() >= '3.11':
cmd.append('--wait=false')
+ # Forcefully delete
+ if is_force:
+ cmd.append("--grace-period 0 --force")
+
command.cmd_run(cmd, hostname=ocp_node)
@@ -1068,7 +1073,7 @@ def wait_for_pod_be_ready(hostname, pod_name,
g.log.info("pod %s is in ready state and is "
"Running" % pod_name)
return True
- elif output[1] == "Error":
+ elif output[1] in ["Error", "CrashBackOffLoop"]:
msg = ("pod %s status error" % pod_name)
g.log.error(msg)
raise exceptions.ExecutionError(msg)
diff --git a/tests/functional/logging/test_logging_validations.py b/tests/functional/logging/test_logging_validations.py
index 63346f0a..a160fd7a 100644
--- a/tests/functional/logging/test_logging_validations.py
+++ b/tests/functional/logging/test_logging_validations.py
@@ -112,7 +112,7 @@ class TestLoggingAndGlusterRegistryValidation(GlusterBlockBaseClass):
@pytest.mark.tier3
def test_validate_logging_pods_and_pvc(self):
- """Validate metrics pods and PVC"""
+ """Validate logging pods and PVC"""
# Wait for kibana pod to be ready
kibana_pod = openshift_ops.get_pod_name_from_dc(
@@ -274,3 +274,81 @@ class TestLoggingAndGlusterRegistryValidation(GlusterBlockBaseClass):
openshift_ops.oc_rsh(self._master, es_pod, cmd_run_io)
self.addCleanup(
openshift_ops.oc_rsh, self._master, es_pod, cmd_remove_file)
+
+ def _delete_and_wait_for_new_es_pod_to_come_up(self):
+
+ # Force delete and wait for es pod to come up
+ openshift_ops.switch_oc_project(
+ self._master, self._logging_project_name)
+ pod_name = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ openshift_ops.oc_delete(self._master, 'pod', pod_name, is_force=True)
+ openshift_ops.wait_for_resource_absence(self._master, 'pod', pod_name)
+ new_pod_name = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ openshift_ops.wait_for_pod_be_ready(
+ self._master, new_pod_name, timeout=1800)
+
+ @pytest.mark.tier2
+ @ddt.data('delete', 'drain')
+ def test_respin_es_pod(self, motive):
+ """Validate respin of elastic search pod"""
+
+ # Get the pod name and PVC name
+ es_pod = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
+ pvc_name = openshift_ops.oc_get_custom_resource(
+ self._master, "pod", pvc_custom, es_pod)[0]
+
+ # Validate iscsi and multipath
+ _, _, node = self.verify_iscsi_sessions_and_multipath(
+ pvc_name, self._logging_es_dc,
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+ if motive == 'delete':
+
+ # Delete the es pod
+ self.addCleanup(self._delete_and_wait_for_new_es_pod_to_come_up)
+ openshift_ops.oc_delete(self._master, "pod", es_pod)
+ elif motive == 'drain':
+
+ # Get the number of infra nodes
+ infra_node_count_cmd = (
+ 'oc get nodes '
+ '--no-headers -l node-role.kubernetes.io/infra=true|wc -l')
+ infra_node_count = command.cmd_run(
+ infra_node_count_cmd, self._master)
+
+ # Skip test case if number infra nodes are less than #2
+ if int(infra_node_count) < 2:
+ self.skipTest('Available number of infra nodes "{}", it should'
+ ' be more than 1'.format(infra_node_count))
+
+ # Cleanup to make node schedulable
+ cmd_schedule = (
+ 'oc adm manage-node {} --schedulable=true'.format(node))
+ self.addCleanup(
+ command.cmd_run, cmd_schedule, hostname=self._master)
+
+ # Drain the node
+ drain_cmd = ('oc adm drain {} --force=true --ignore-daemonsets '
+ '--delete-local-data'.format(node))
+ command.cmd_run(drain_cmd, hostname=self._master)
+
+ # Wait for pod to get absent
+ openshift_ops.wait_for_resource_absence(self._master, "pod", es_pod)
+
+ # Wait for new pod to come up
+ try:
+ pod_name = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ openshift_ops.wait_for_pod_be_ready(self._master, pod_name)
+ except exceptions.ExecutionError:
+ self._delete_and_wait_for_new_es_pod_to_come_up()
+
+ # Validate iscsi and multipath
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, self._logging_es_dc,
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)