summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorNitin Goyal <nigoyal@redhat.com>2019-07-15 16:02:20 +0530
committervponomar <vponomar@redhat.com>2019-08-13 13:42:27 +0000
commit45958781660802c7c8792c307fda05e43323cf20 (patch)
treed6a370b1f9c66b6744638a0352330336659aa43c /tests
parentf0727d7266e0b826aa75f8babdd854bfb268505f (diff)
Add TC restart app pod when target node is down
New TC insures that app pod is restarted properly when one of the target node is down. This patch includes libs of vmware api These libraries can perform VM operations like power on and off via vmware client api's. Change-Id: I11ad4dc3318cb0583de5882d8067ed7e30ea9962
Diffstat (limited to 'tests')
-rw-r--r--tests/functional/gluster_stability/test_gluster_block_stability.py80
-rw-r--r--tests/glusterfs-containers-tests-config.yaml10
2 files changed, 90 insertions, 0 deletions
diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py
index 0232c790..acbec125 100644
--- a/tests/functional/gluster_stability/test_gluster_block_stability.py
+++ b/tests/functional/gluster_stability/test_gluster_block_stability.py
@@ -1,10 +1,23 @@
from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
from openshiftstoragelibs.command import cmd_run
+from openshiftstoragelibs.exceptions import ConfigError
+from openshiftstoragelibs.heketi_ops import (
+ heketi_node_info,
+ heketi_node_list,
+)
+from openshiftstoragelibs.node_ops import (
+ find_vm_name_by_ip_or_hostname,
+ power_off_vm_by_name,
+ power_on_vm_by_name,
+)
from openshiftstoragelibs.openshift_ops import (
cmd_run_on_gluster_pod_or_node,
+ get_ocp_gluster_pod_details,
get_pod_name_from_dc,
+ get_pv_name_from_pvc,
oc_adm_manage_node,
oc_delete,
+ oc_get_custom_resource,
oc_get_schedulable_nodes,
oc_rsh,
wait_for_pod_be_ready,
@@ -263,3 +276,70 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Verify that all the paths are up
self.verify_all_paths_are_up_in_multipath(mpath, hacount, node)
+
+ def test_initiator_side_failure_restart_pod_when_target_node_is_down(self):
+ """Restart app pod when one gluster node is down"""
+ # Skip test if does not meets requirements
+ try:
+ vm_name = find_vm_name_by_ip_or_hostname(self.node)
+ except (NotImplementedError, ConfigError) as e:
+ self.skipTest(e)
+
+ # Get heketi node list
+ h_nodes_ids = heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ # Get the ips and hostname of gluster nodes from heketi
+ h_nodes = {}
+ for node in h_nodes_ids:
+ info = heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url, node,
+ json=True)
+ h_nodes[info['hostnames']['storage'][0]] = (
+ info['hostnames']['manage'][0])
+
+ pvc_name = self.create_and_wait_for_pvc()
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+
+ # Create app pod
+ dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)
+
+ iqn, hacount, p_node = self.verify_iscsi_sessions_and_multipath(
+ self.pvc_name, dc_name)
+
+ # Get list of containerized gluster nodes
+ g_nodes = get_ocp_gluster_pod_details(self.node)
+
+ # Get target portals for the PVC
+ targets = oc_get_custom_resource(
+ self.node, 'pv', ':.spec.iscsi.portals,:.spec.iscsi.targetPortal',
+ name=pv_name)
+ targets = [item.strip('[').strip(
+ ']') for item in targets if isinstance(item, str)]
+
+ # Select hostname for powering off
+ if h_nodes[targets[0]] == p_node:
+ vm_hostname = h_nodes[targets[1]]
+ else:
+ vm_hostname = h_nodes[targets[0]]
+
+ # Find VM Name for powering it off
+ vm_name = find_vm_name_by_ip_or_hostname(vm_hostname)
+
+ # Unschedulable Node if containerised glusterfs
+ if g_nodes:
+ oc_adm_manage_node(self.node, '--schedulable=false', [vm_hostname])
+ self.addCleanup(
+ oc_adm_manage_node, self.node, '--schedulable', [vm_hostname])
+
+ # Power off gluster node
+ power_off_vm_by_name(vm_name)
+ self.addCleanup(power_on_vm_by_name, vm_name)
+
+ # Delete pod so it get respun
+ oc_delete(self.node, 'pod', pod_name)
+ wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ # Wait for pod to come up when 1 target node is down
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name, timeout=120, wait_step=5)
diff --git a/tests/glusterfs-containers-tests-config.yaml b/tests/glusterfs-containers-tests-config.yaml
index c69f3922..d92dbaac 100644
--- a/tests/glusterfs-containers-tests-config.yaml
+++ b/tests/glusterfs-containers-tests-config.yaml
@@ -59,3 +59,13 @@ openshift:
common:
stop_on_first_failure: False
heketi_command_timeout: 120
+
+cloud_provider:
+ name: '<fake-cloud-provider-name eg. vmware>'
+ vmware:
+ hostname: '<fake-hostname>'
+ username: '<fake-username>'
+ password: '<fake-password>'
+ port: 443
+ aws: # To be done in future
+ libvirt: # To be done in future