summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoradityaramteke <adityaramteke05icr@gmail.com>2021-01-18 10:06:47 +0530
committerVaibhav Mahajan <vamahaja@redhat.com>2021-02-05 12:08:16 +0000
commite13e922170f75e5794f3d99e02c35cc11887fd28 (patch)
tree208361e1dac8b8e4a3deeec7d966c161d3485e71
parent29b2da6f3fa64641411bfa7f72c474fbc846ccbe (diff)
[Test] Add TC to validate node down during brick evict operation
Change-Id: I6df2fac551dd53edce25af15680d053ebb089721 Signed-off-by: Aditya Ramteke <aramteke@redhat.com>
-rw-r--r--tests/functional/heketi/test_heketi_brick_evict.py90
1 files changed, 90 insertions, 0 deletions
diff --git a/tests/functional/heketi/test_heketi_brick_evict.py b/tests/functional/heketi/test_heketi_brick_evict.py
index 27cc1eb..1cba24c 100644
--- a/tests/functional/heketi/test_heketi_brick_evict.py
+++ b/tests/functional/heketi/test_heketi_brick_evict.py
@@ -1,11 +1,16 @@
import pytest
from glustolibs.gluster import volume_ops
+import six
from openshiftstoragelibs.baseclass import BaseClass
+from openshiftstoragelibs import exceptions
from openshiftstoragelibs import heketi_ops
from openshiftstoragelibs import heketi_version
+from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_ops
from openshiftstoragelibs import podcmd
+from openshiftstoragelibs import waiter
class TestHeketiBrickEvict(BaseClass):
@@ -20,6 +25,8 @@ class TestHeketiBrickEvict(BaseClass):
"heketi-client package {} does not support brick evict".format(
version.v_str))
+ self.ocp_client = self.ocp_master_node[0]
+
node_list = heketi_ops.heketi_node_list(
self.heketi_client_node, self.heketi_server_url)
@@ -88,3 +95,86 @@ class TestHeketiBrickEvict(BaseClass):
bricks_new, gbricks, "gluster vol info and heketi vol info "
"mismatched after brick evict {} \n {}".format(
gvol_info, vol_info_new))
+
+ def _wait_for_gluster_pod_after_node_reboot(self, node_hostname):
+ """Wait for glusterfs pod to be ready after node reboot"""
+ openshift_ops.wait_for_ocp_node_be_ready(
+ self.ocp_client, node_hostname)
+ gluster_pod = openshift_ops.get_gluster_pod_name_for_specific_node(
+ self.ocp_client, node_hostname)
+ openshift_ops.wait_for_pod_be_ready(self.ocp_client, gluster_pod)
+ services = (
+ ("glusterd", "running"), ("gluster-blockd", "running"),
+ ("tcmu-runner", "running"), ("gluster-block-target", "exited"))
+ for service, state in services:
+ openshift_ops.check_service_status_on_pod(
+ self.ocp_client, gluster_pod, service, "active", state)
+
+ @pytest.mark.tier4
+ def test_brick_evict_with_node_down(self):
+ """Test brick evict basic functionality and verify brick evict
+ after node down"""
+
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+
+ # Disable node if more than 3
+ node_list = heketi_ops.heketi_node_list(h_node, h_server)
+ if len(node_list) > 3:
+ for node_id in node_list[3:]:
+ heketi_ops.heketi_node_disable(h_node, h_server, node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_enable, h_node, h_server, node_id)
+
+ # Create heketi volume
+ vol_info = heketi_ops.heketi_volume_create(
+ h_node, h_server, 1, json=True)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete,
+ h_node, h_server, vol_info.get('id'))
+
+ # Get node on which heketi pod is scheduled
+ heketi_pod = openshift_ops.get_pod_name_from_dc(
+ self.ocp_client, self.heketi_dc_name)
+ heketi_node = openshift_ops.oc_get_custom_resource(
+ self.ocp_client, 'pod', '.:spec.nodeName', heketi_pod)[0]
+
+ # Get list of hostname from node id
+ host_list = []
+ for node_id in node_list[3:]:
+ node_info = heketi_ops.heketi_node_info(
+ h_node, h_server, node_id, json=True)
+ host_list.append(node_info.get('hostnames').get('manage')[0])
+
+ # Get brick id and glusterfs node which is not heketi node
+ for node in vol_info.get('bricks', {}):
+ node_info = heketi_ops.heketi_node_info(
+ h_node, h_server, node.get('node'), json=True)
+ hostname = node_info.get('hostnames').get('manage')[0]
+ if (hostname != heketi_node) and (hostname not in host_list):
+ brick_id = node.get('id')
+ break
+
+ # Bring down the glusterfs node
+ vm_name = node_ops.find_vm_name_by_ip_or_hostname(hostname)
+ self.addCleanup(
+ self._wait_for_gluster_pod_after_node_reboot, hostname)
+ self.addCleanup(node_ops.power_on_vm_by_name, vm_name)
+ node_ops.power_off_vm_by_name(vm_name)
+
+ # Wait glusterfs node to become NotReady
+ custom = r'":.status.conditions[?(@.type==\"Ready\")]".status'
+ for w in waiter.Waiter(300, 20):
+ status = openshift_ops.oc_get_custom_resource(
+ self.ocp_client, 'node', custom, hostname)
+ if status[0] in ['False', 'Unknown']:
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to bring down node {}".format(hostname))
+
+ # Perform brick evict operation
+ try:
+ heketi_ops.heketi_brick_evict(h_node, h_server, brick_id)
+ except AssertionError as e:
+ if ('No Replacement was found' not in six.text_type(e)):
+ raise