summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py51
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/heketi_ops.py49
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py64
-rw-r--r--tests/functional/heketi/test_heketi_node_operations.py68
4 files changed, 232 insertions, 0 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index 715bbc64..1e52f560 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -22,6 +22,10 @@ from openshiftstoragelibs.heketi_ops import (
heketi_volume_info,
heketi_volume_list,
)
+from openshiftstoragelibs.node_ops import (
+ node_add_iptables_rules,
+ node_delete_iptables_rules,
+)
from openshiftstoragelibs.openshift_ops import (
get_pod_name_from_dc,
get_pv_name_from_pvc,
@@ -32,9 +36,11 @@ from openshiftstoragelibs.openshift_ops import (
oc_delete,
oc_get_custom_resource,
oc_get_pods,
+ oc_label,
scale_dcs_pod_amount_and_wait,
switch_oc_project,
wait_for_pvcs_be_bound,
+ wait_for_pods_be_ready,
wait_for_resources_absence,
)
from openshiftstoragelibs.openshift_storage_libs import (
@@ -364,6 +370,51 @@ class BaseClass(unittest.TestCase):
return h_volume_info
+ def configure_node_to_run_gluster_node(self, storage_hostname):
+ glusterd_status_cmd = "systemctl is-active glusterd"
+ command.cmd_run(glusterd_status_cmd, storage_hostname)
+
+ ports = ("24010", "3260", "111", "22", "24007", "24008", "49152-49664")
+ add_port = " ".join(["--add-port=%s/tcp" % port for port in ports])
+ add_firewall_rule_cmd = "firewall-cmd --zone=public %s" % add_port
+ command.cmd_run(add_firewall_rule_cmd, storage_hostname)
+
+ def configure_node_to_run_gluster_pod(self, storage_hostname):
+ ports = (
+ "24010", "3260", "111", "2222", "24007", "24008", "49152:49664")
+ iptables_rule_pattern = (
+ "-p tcp -m state --state NEW -m %s --%s %s -j ACCEPT")
+ iptables_rule_chain = "OS_FIREWALL_ALLOW"
+ iptables_rules = []
+ for port in ports:
+ if ":" in port:
+ iptables_rules.append(
+ iptables_rule_pattern % ("multiport", "dports", port))
+ else:
+ iptables_rules.append(
+ iptables_rule_pattern % ("tcp", "dport", port))
+ node_add_iptables_rules(
+ storage_hostname, iptables_rule_chain, iptables_rules)
+ self.addCleanup(
+ node_delete_iptables_rules,
+ storage_hostname, iptables_rule_chain, iptables_rules)
+
+ gluster_host_label = "glusterfs=storage-host"
+ gluster_pod_label = "glusterfs=storage-pod"
+ oc_label(
+ self.ocp_client[0], "node", storage_hostname, gluster_host_label)
+ self.addCleanup(
+ wait_for_pods_be_ready,
+ self.ocp_client[0], len(self.gluster_servers),
+ selector=gluster_pod_label)
+ self.addCleanup(
+ oc_label,
+ self.ocp_client[0], "node", storage_hostname, "glusterfs-")
+
+ wait_for_pods_be_ready(
+ self.ocp_client[0], len(self.gluster_servers) + 1,
+ selector=gluster_pod_label)
+
def is_containerized_gluster(self):
cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
"-o=custom-columns=:.spec.nodeName")
diff --git a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
index 98c5154a..251104d3 100644
--- a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
@@ -804,6 +804,55 @@ def heketi_device_remove(heketi_client_node, heketi_server_url, device_id,
return out
+def heketi_node_add(heketi_client_node, heketi_server_url, zone, cluster_id,
+ management_host_name, storage_host_name, **kwargs):
+ """Executes heketi node add command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed
+ heketi_server_url (str): Heketi server url
+ zone (int): The zone in which the node should reside
+ cluster_id (str): The cluster in which the node should reside
+ management_host_name (str): Management host name
+ storage_host_name (str): Sotage host name
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ Str: Node info as raw CLI output if "json" arg is not provided.
+ Dict: Node info parsed to dict if "json" arg is provided.
+
+ Raises:
+ exceptions.AssertionError: if command fails.
+
+ Example:
+ heketi_node_add(
+ heketi_client_node, heketi_server_url,
+ zone, cluster_id, management_host_name, storage_host_name)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = (
+ "heketi-cli -s %s node add %s %s %s "
+ "--zone=%d --cluster=%s --management-host-name=%s "
+ "--storage-host-name=%s" % (
+ heketi_server_url, json_arg, admin_key, user,
+ zone, cluster_id, management_host_name, storage_host_name))
+
+ cmd = TIMEOUT_PREFIX + cmd
+ out = heketi_cmd_run(heketi_client_node, cmd)
+ if json_arg:
+ return json.loads(out)
+ else:
+ return out
+
+
def heketi_node_delete(heketi_client_node, heketi_server_url, node_id,
**kwargs):
"""Executes heketi node delete command.
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
index 07c90d7a..9acec4ef 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -570,6 +570,22 @@ def oc_get_all_pvs(ocp_node):
return oc_get_yaml(ocp_node, 'pv', None)
+def oc_label(hostname, rtype, rname, label):
+ """Add label for given resource
+ Args:
+ hostname (str): Node where we want to run our commands.
+ rtype (str): Type of resource.
+ rname (str): Name of resource.
+
+ Raises:
+ AssertionError: In case adding label to resource fails.
+ """
+ cmd = "oc label %s %s %s" % (rtype, rname, label)
+ out = command.cmd_run(cmd, hostname=hostname)
+
+ return out
+
+
def wait_for_resources_absence(ocp_node, rtype, names,
interval=5, timeout=600):
"""Wait for an absence of any set of resources of one type.
@@ -961,6 +977,54 @@ def wait_for_pod_be_ready(hostname, pod_name,
raise exceptions.ExecutionError(err_msg)
+def wait_for_pods_be_ready(
+ hostname, pod_count, selector, timeout=600, wait_step=10):
+ """Wait to 'pod_count' gluster pods be in Ready state.
+
+ Args:
+ hostname (str): Node where we want to run our commands.
+ pod_count (int): No of pods to be waited for.
+ selector (str): Selector to select pods of given label.
+ timeout (int): Seconds to wait for Node to be Ready.
+ wait_step (int): Interval in seconds to wait before checking
+ status again.
+
+ Raises:
+ AssertionError: In case it fails to get pods.
+ ExecutionError: In case pods won't get in ready state for given time.
+ """
+ if not selector:
+ raise exceptions.ExecutionError(
+ "selector parameter should be provided")
+
+ custom = (
+ r':.metadata.name,":.status.conditions[?(@.type==\"Ready\")]".status')
+ pod_status = None
+ for w in waiter.Waiter(timeout, wait_step):
+ pod_status = oc_get_custom_resource(
+ hostname, "pod", custom, selector=selector)
+
+ if not pod_status:
+ raise exceptions.ExecutionError(
+ "Unable to find pod with selector %s" % selector)
+ status = [status for _, status in pod_status]
+ if len(status) == pod_count == status.count("True"):
+ return
+ try:
+ pod_events = ""
+ for pod_name, _ in pod_status:
+ pod_events += (
+ "\n" + get_events(hostname, obj_name=pod_name, obj_type="Pod"))
+ except Exception:
+ pod_events = "?"
+
+ err_msg = (
+ "Failed to wait %s sec for pods be in ready state.\n"
+ "Events info: %s" % (timeout, pod_events))
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+
+
def get_pod_names_from_dc(hostname, dc_name, timeout=180, wait_step=3):
"""Return list of POD names by their DC.
diff --git a/tests/functional/heketi/test_heketi_node_operations.py b/tests/functional/heketi/test_heketi_node_operations.py
index 6386be6f..1ceb67f6 100644
--- a/tests/functional/heketi/test_heketi_node_operations.py
+++ b/tests/functional/heketi/test_heketi_node_operations.py
@@ -1,3 +1,4 @@
+from glusto.core import Glusto as g
from glustolibs.gluster import peer_ops
from openshiftstoragelibs import baseclass
@@ -106,3 +107,70 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
vol_info = heketi_ops.heketi_volume_create(
h_client, h_server, vol_size, json=True)
heketi_ops.heketi_volume_delete(h_client, h_server, vol_info['id'])
+
+ def test_heketi_node_add_with_valid_cluster(self):
+ """Test heketi node add operation with valid cluster id"""
+ storage_host_info = g.config.get("additional_gluster_servers")
+ if not storage_host_info:
+ self.skipTest(
+ "Skip test case as 'additional_gluster_servers' option is "
+ "not provided in config file")
+
+ h_client, h_server = self.heketi_client_node, self.heketi_server_url
+
+ storage_host_info = list(storage_host_info.values())[0]
+ storage_host_manage = storage_host_info["manage"]
+ storage_host_name = storage_host_info["storage"]
+ storage_device = storage_host_info["additional_devices"][0]
+ storage_zone = 1
+
+ cluster_info = heketi_ops.heketi_cluster_list(
+ h_client, h_server, json=True)
+ cluster_id = cluster_info["clusters"][0]
+
+ if self.is_containerized_gluster():
+ self.configure_node_to_run_gluster_pod(storage_host_manage)
+ else:
+ self.configure_node_to_run_gluster_node(storage_host_manage)
+
+ heketi_node_info = heketi_ops.heketi_node_add(
+ h_client, h_server, storage_zone, cluster_id,
+ storage_host_manage, storage_host_name, json=True)
+ heketi_node_id = heketi_node_info["id"]
+ self.addCleanup(
+ heketi_ops.heketi_node_delete, h_client, h_server, heketi_node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_remove, h_client, h_server, heketi_node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_disable, h_client, h_server, heketi_node_id)
+ self.assertEqual(
+ heketi_node_info["cluster"], cluster_id,
+ "Node got added in unexpected cluster exp: %s, act: %s" % (
+ cluster_id, heketi_node_info["cluster"]))
+
+ heketi_ops.heketi_device_add(
+ h_client, h_server, storage_device, heketi_node_id)
+ heketi_node_info = heketi_ops.heketi_node_info(
+ h_client, h_server, heketi_node_id, json=True)
+ device_id = None
+ for device in heketi_node_info["devices"]:
+ if device["name"] == storage_device:
+ device_id = device["id"]
+ break
+ err_msg = ("Failed to add device %s on node %s" % (
+ storage_device, heketi_node_id))
+ self.assertTrue(device_id, err_msg)
+
+ self.addCleanup(
+ heketi_ops.heketi_device_delete, h_client, h_server, device_id)
+ self.addCleanup(
+ heketi_ops.heketi_device_remove, h_client, h_server, device_id)
+ self.addCleanup(
+ heketi_ops.heketi_device_disable, h_client, h_server, device_id)
+
+ cluster_info = heketi_ops.heketi_cluster_info(
+ h_client, h_server, cluster_id, json=True)
+ self.assertIn(
+ heketi_node_info["id"], cluster_info["nodes"],
+ "Newly added node %s not found in cluster %s, cluster info %s" % (
+ heketi_node_info["id"], cluster_id, cluster_info))