From 74034b474648667e5d6242191130093a8399286e Mon Sep 17 00:00:00 2001 From: vamahaja Date: Fri, 24 Apr 2020 16:38:29 +0530 Subject: [TestFix] Fix test case to create volume after rebootig node Fix consists of - Currently 'test_heketi_volume_provision_after_node_reboot' tc uses 'node_reboot_by_command' function which reboots node by using 'shutdown -r' command and comes up fast, which causes to create volume successfully in case more than 2 nodes are rebooted. Use power off and power on node libraries to reboot gluster nodes instead. Change-Id: Ie35a5f5be0bca6899d06d80de368491d2e1f3611 Signed-off-by: vamahaja --- tests/functional/heketi/test_volume_creation.py | 34 ++++++++++--------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/tests/functional/heketi/test_volume_creation.py b/tests/functional/heketi/test_volume_creation.py index 0778d19c..acedc1ba 100644 --- a/tests/functional/heketi/test_volume_creation.py +++ b/tests/functional/heketi/test_volume_creation.py @@ -5,9 +5,9 @@ import pytest import six from openshiftstoragelibs.baseclass import BaseClass +from openshiftstoragelibs import exceptions from openshiftstoragelibs import heketi_ops from openshiftstoragelibs import node_ops -from openshiftstoragelibs import openshift_ops from openshiftstoragelibs import podcmd from openshiftstoragelibs import utils @@ -414,6 +414,12 @@ class TestVolumeCreationTestCases(BaseClass): @pytest.mark.tier2 def test_heketi_volume_provision_after_node_reboot(self): """Provision volume before and after node reboot""" + # Skip test if not able to connect to Cloud Provider + try: + node_ops.find_vm_name_by_ip_or_hostname(self.node) + except (NotImplementedError, exceptions.ConfigError) as e: + self.skipTest(e) + h_client, h_server = self.heketi_client_node, self.heketi_server_url g_nodes = [ g_node["manage"] @@ -426,9 +432,10 @@ class TestVolumeCreationTestCases(BaseClass): heketi_ops.heketi_volume_delete, h_client, h_server, vol_info['id']) - # Reboot gluster server nodes + # Power off gluster server nodes for g_node in g_nodes: - node_ops.node_reboot_by_command(g_node, wait_for_connection=False) + vm_name = node_ops.find_vm_name_by_ip_or_hostname(g_node) + self.power_off_gluster_node_vm(vm_name, g_node) # Create heketi volume when gluster nodes are down with self.assertRaises(AssertionError): @@ -438,25 +445,10 @@ class TestVolumeCreationTestCases(BaseClass): heketi_ops.heketi_volume_delete, h_client, h_server, vol_info['id']) + # Power on gluster server nodes for g_node in g_nodes: - node_ops.wait_for_ssh_connection(g_node) - - # Wait for the gluster pods and nodes to be ready - if self.is_containerized_gluster(): - for g_node in g_nodes: - openshift_ops.wait_for_ocp_node_be_ready(self.node, g_node) - gluster_pod_details = ( - openshift_ops.get_ocp_gluster_pod_details(self.node)) - gluster_pod = list( - filter( - lambda pod: (pod["pod_hostname"] == g_node), - gluster_pod_details)) - openshift_ops.wait_for_pod_be_ready( - self.node, gluster_pod[0]["pod_name"]) - - for g_node in g_nodes: - openshift_ops.wait_for_service_status_on_gluster_pod_or_node( - self.node, 'glusterd', 'active', 'running', g_node) + vm_name = node_ops.find_vm_name_by_ip_or_hostname(g_node) + self.power_on_gluster_node_vm(vm_name, g_node) # Try to create heketi volume after reboot vol_info = heketi_ops.heketi_volume_create( -- cgit