summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvamahaja <vamahaja@redhat.com>2019-11-06 11:14:17 +0530
committervamahaja <vamahaja@redhat.com>2019-11-19 12:57:33 +0530
commita9b27974c785db4d52ec16faaaa4778d26c42dbb (patch)
treefa8c0f44e755dbf77a21184084537c11828d97f0
parent98c622d2d4b29583e317c8993923a317e22b1ace (diff)
Fix test case "test_heketi_node_add_with_valid_cluster"
Fix consists of - - Remove steps for add device validation. - Add steps for gluser peer status validation. - Add steps for endpoints validation. Change-Id: I773a883b1ea8bb2bc93660db3459fd864fcd1317 Signed-off-by: vamahaja <vamahaja@redhat.com>
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/heketi_ops.py39
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py30
-rw-r--r--tests/functional/heketi/test_heketi_node_operations.py132
3 files changed, 162 insertions, 39 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
index c28c98d..7cb0478 100644
--- a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
@@ -1739,3 +1739,42 @@ def heketi_db_check(heketi_client_node, heketi_server_url, **kwargs):
cmd = TIMEOUT_PREFIX + cmd
out = heketi_cmd_run(heketi_client_node, cmd)
return json.loads(out)
+
+
+def heketi_volume_endpoint_patch(
+ heketi_client_node, heketi_server_url, volume_id, **kwargs):
+ """Execute heketi volume endpoint patch command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ volume_id (str): Volume ID
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: endpoint info on success
+
+ Raises:
+ exceptions.AssertionError: if command fails.
+ """
+ version = heketi_version.get_heketi_version(heketi_client_node)
+ if version < '9.0.0-1':
+ msg = (
+ "heketi-client package %s does not support endpoint patch "
+ "functionality" % version.v_str)
+ g.log.error(msg)
+ raise NotImplementedError(msg)
+
+ heketi_server_url, _, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s volume endpoint patch %s %s %s" % (
+ heketi_server_url, volume_id, admin_key, user)
+ cmd = TIMEOUT_PREFIX + cmd
+ out = heketi_cmd_run(heketi_client_node, cmd)
+
+ return json.loads(out)
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
index bee7af4..87dd634 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -1842,3 +1842,33 @@ def oc_create_service_monitor(hostname, sm_name="heketi",
})
oc_create(hostname, sm_data, 'stdin')
return sm_name
+
+
+def oc_patch(ocp_node, rtype, rname, changes, raise_on_error=True):
+ """Patch openshift resource with change
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ rname (str): Name of the resource to fetch.
+ changes (dict): Changes to be applied through patch.
+ raise_on_error (bool): If set to true a failure to patch
+ resource with changes will raise an error, otherwise
+ an None will be returned.
+ Returns:
+ str : output of oc patch command
+ Raises:
+ exceptions.ExecutionError: Raise when invalid json is provided.
+ AssertionError: Raised when unable to patch resource and
+ 'raise_on_error' is true.
+ """
+ try:
+ changes = json.dumps(changes)
+ except TypeError:
+ raise exceptions.ExecutionError(
+ "Json %s is not serializable to string")
+
+ cmd = ['oc', 'patch', rtype, rname, '-p', '\'%s\'' % changes]
+ out = command.cmd_run(
+ cmd, hostname=ocp_node, raise_on_error=raise_on_error)
+ return out or None
diff --git a/tests/functional/heketi/test_heketi_node_operations.py b/tests/functional/heketi/test_heketi_node_operations.py
index 1ceb67f..6b06a2d 100644
--- a/tests/functional/heketi/test_heketi_node_operations.py
+++ b/tests/functional/heketi/test_heketi_node_operations.py
@@ -2,7 +2,10 @@ from glusto.core import Glusto as g
from glustolibs.gluster import peer_ops
from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import exceptions
from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import openshift_storage_version
from openshiftstoragelibs import podcmd
@@ -108,34 +111,32 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
h_client, h_server, vol_size, json=True)
heketi_ops.heketi_volume_delete(h_client, h_server, vol_info['id'])
- def test_heketi_node_add_with_valid_cluster(self):
- """Test heketi node add operation with valid cluster id"""
+ def add_heketi_node_to_cluster(self, cluster_id):
+ """Add new node to a cluster"""
storage_host_info = g.config.get("additional_gluster_servers")
if not storage_host_info:
self.skipTest(
"Skip test case as 'additional_gluster_servers' option is "
"not provided in config file")
- h_client, h_server = self.heketi_client_node, self.heketi_server_url
-
storage_host_info = list(storage_host_info.values())[0]
- storage_host_manage = storage_host_info["manage"]
- storage_host_name = storage_host_info["storage"]
- storage_device = storage_host_info["additional_devices"][0]
- storage_zone = 1
+ try:
+ storage_hostname = storage_host_info["manage"]
+ storage_ip = storage_host_info["storage"]
+ except KeyError:
+ msg = ("Config options 'additional_gluster_servers.manage' "
+ "and 'additional_gluster_servers.storage' must be set.")
+ g.log.error(msg)
+ raise exceptions.ConfigError(msg)
- cluster_info = heketi_ops.heketi_cluster_list(
- h_client, h_server, json=True)
- cluster_id = cluster_info["clusters"][0]
+ h_client, h_server = self.heketi_client_node, self.heketi_server_url
+ storage_zone = 1
- if self.is_containerized_gluster():
- self.configure_node_to_run_gluster_pod(storage_host_manage)
- else:
- self.configure_node_to_run_gluster_node(storage_host_manage)
+ self.configure_node_to_run_gluster(storage_hostname)
heketi_node_info = heketi_ops.heketi_node_add(
h_client, h_server, storage_zone, cluster_id,
- storage_host_manage, storage_host_name, json=True)
+ storage_hostname, storage_ip, json=True)
heketi_node_id = heketi_node_info["id"]
self.addCleanup(
heketi_ops.heketi_node_delete, h_client, h_server, heketi_node_id)
@@ -148,29 +149,82 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
"Node got added in unexpected cluster exp: %s, act: %s" % (
cluster_id, heketi_node_info["cluster"]))
- heketi_ops.heketi_device_add(
- h_client, h_server, storage_device, heketi_node_id)
- heketi_node_info = heketi_ops.heketi_node_info(
- h_client, h_server, heketi_node_id, json=True)
- device_id = None
- for device in heketi_node_info["devices"]:
- if device["name"] == storage_device:
- device_id = device["id"]
+ return storage_hostname, storage_ip
+
+ @podcmd.GlustoPod()
+ def test_heketi_node_add_with_valid_cluster(self):
+ """Test heketi node add operation with valid cluster id"""
+ if (openshift_storage_version.get_openshift_storage_version()
+ < "3.11.4"):
+ self.skipTest(
+ "This test case is not supported for < OCS 3.11.4 builds due "
+ "to bug BZ-1732831")
+
+ h_client, h_server = self.heketi_client_node, self.heketi_server_url
+ ocp_node = self.ocp_master_node[0]
+
+ # Get heketi endpoints before adding node
+ h_volume_ids = heketi_ops.heketi_volume_list(
+ h_client, h_server, json=True)
+ h_endpoints_before_new_node = heketi_ops.heketi_volume_endpoint_patch(
+ h_client, h_server, h_volume_ids["volumes"][0])
+
+ cluster_info = heketi_ops.heketi_cluster_list(
+ h_client, h_server, json=True)
+ storage_hostname, storage_ip = self.add_heketi_node_to_cluster(
+ cluster_info["clusters"][0])
+
+ # Get heketi nodes and validate for newly added node
+ h_node_ids = heketi_ops.heketi_node_list(h_client, h_server, json=True)
+ for h_node_id in h_node_ids:
+ node_hostname = heketi_ops.heketi_node_info(
+ h_client, h_server, h_node_id, json=True)
+ if node_hostname["hostnames"]["manage"][0] == storage_hostname:
break
- err_msg = ("Failed to add device %s on node %s" % (
- storage_device, heketi_node_id))
- self.assertTrue(device_id, err_msg)
+ node_hostname = None
+ err_msg = ("Newly added heketi node %s not found in heketi node "
+ "list %s" % (storage_hostname, h_node_ids))
+ self.assertTrue(node_hostname, err_msg)
+
+ # Check gluster peer status for newly added node
+ if self.is_containerized_gluster():
+ gluster_pods = openshift_ops.get_ocp_gluster_pod_details(ocp_node)
+ gluster_pod = [
+ gluster_pod["pod_name"]
+ for gluster_pod in gluster_pods
+ if gluster_pod["pod_hostname"] == storage_hostname][0]
+
+ gluster_peer_status = peer_ops.get_peer_status(
+ podcmd.Pod(ocp_node, gluster_pod))
+ else:
+ gluster_peer_status = peer_ops.get_peer_status(
+ storage_hostname)
+ self.assertEqual(
+ len(gluster_peer_status), len(self.gluster_servers))
+
+ err_msg = "Expected peer status is 1 and actual is %s"
+ for peer in gluster_peer_status:
+ peer_status = int(peer["connected"])
+ self.assertEqual(peer_status, 1, err_msg % peer_status)
+
+ # Get heketi endpoints after adding node
+ h_endpoints_after_new_node = heketi_ops.heketi_volume_endpoint_patch(
+ h_client, h_server, h_volume_ids["volumes"][0])
+
+ # Get openshift openshift endpoints and patch with heketi endpoints
+ heketi_db_endpoint = openshift_ops.oc_get_custom_resource(
+ ocp_node, "dc", name=self.heketi_dc_name,
+ custom=".:spec.template.spec.volumes[*].glusterfs.endpoints")[0]
+ openshift_ops.oc_patch(
+ ocp_node, "ep", heketi_db_endpoint, h_endpoints_after_new_node)
self.addCleanup(
- heketi_ops.heketi_device_delete, h_client, h_server, device_id)
- self.addCleanup(
- heketi_ops.heketi_device_remove, h_client, h_server, device_id)
- self.addCleanup(
- heketi_ops.heketi_device_disable, h_client, h_server, device_id)
-
- cluster_info = heketi_ops.heketi_cluster_info(
- h_client, h_server, cluster_id, json=True)
- self.assertIn(
- heketi_node_info["id"], cluster_info["nodes"],
- "Newly added node %s not found in cluster %s, cluster info %s" % (
- heketi_node_info["id"], cluster_id, cluster_info))
+ openshift_ops.oc_patch, ocp_node, "ep", heketi_db_endpoint,
+ h_endpoints_before_new_node)
+ ep_addresses = openshift_ops.oc_get_custom_resource(
+ ocp_node, "ep", name=heketi_db_endpoint,
+ custom=".:subsets[*].addresses[*].ip")[0].split(",")
+
+ err_msg = "Hostname %s not present in endpoints %s" % (
+ storage_ip, ep_addresses)
+ self.assertIn(storage_ip, ep_addresses, err_msg)