summaryrefslogtreecommitdiffstats
path: root/openshift-storage-libs
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2019-03-07 20:30:44 +0530
committervponomar <vponomar@redhat.com>2019-03-18 11:34:37 +0000
commit32b611b2a6498b1de307142e335e09d1e0ec082c (patch)
treeaaf600ab6e6adabab7c3facbf30ae6f056731969 /openshift-storage-libs
parent0fcdb081517c5904969b89b20326d21b361e448e (diff)
Reorder lib files removing redundant dir layer
Move all the files of 'cns-libs/cnslibs/common' dir to the 'openshift-storage-libs/openshiftstoragelibs', because 'common' is the only dir there, which doesn't really makes sense. And "cns" is old project name, so, replace it with "openshift-storage-libs". Also, fix all the imports of these libs. Change-Id: Ife00a73554e73b21b214b15016b0c8dbbf423446
Diffstat (limited to 'openshift-storage-libs')
-rw-r--r--openshift-storage-libs/MANIFEST.in1
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/__init__.py0
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py318
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/command.py23
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/exceptions.py23
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/gluster_ops.py260
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/heketi_ops.py1516
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/heketi_version.py246
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/naming.py56
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py1507
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py228
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_version.py173
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/podcmd.py141
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/utils.py40
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/waiter.py38
-rw-r--r--openshift-storage-libs/setup.py30
16 files changed, 4600 insertions, 0 deletions
diff --git a/openshift-storage-libs/MANIFEST.in b/openshift-storage-libs/MANIFEST.in
new file mode 100644
index 00000000..121de5bf
--- /dev/null
+++ b/openshift-storage-libs/MANIFEST.in
@@ -0,0 +1 @@
+recursive-include openshiftstoragelibs *.yaml *.json *.txt
diff --git a/openshift-storage-libs/openshiftstoragelibs/__init__.py b/openshift-storage-libs/openshiftstoragelibs/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/__init__.py
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
new file mode 100644
index 00000000..366af6a9
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -0,0 +1,318 @@
+import datetime
+import unittest
+
+from glusto.core import Glusto as g
+
+from openshiftstoragelibs import command
+from openshiftstoragelibs.exceptions import (
+ ConfigError,
+ ExecutionError,
+)
+from openshiftstoragelibs.heketi_ops import (
+ hello_heketi,
+ heketi_blockvolume_delete,
+ heketi_volume_delete,
+)
+from openshiftstoragelibs.openshift_ops import (
+ get_pod_name_from_dc,
+ get_pv_name_from_pvc,
+ oc_create_app_dc_with_io,
+ oc_create_pvc,
+ oc_create_sc,
+ oc_create_secret,
+ oc_delete,
+ oc_get_custom_resource,
+ scale_dc_pod_amount_and_wait,
+ switch_oc_project,
+ verify_pvc_status_is_bound,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence,
+)
+
+
+class BaseClass(unittest.TestCase):
+ """Base class for test classes."""
+ ERROR_OR_FAILURE_EXISTS = False
+ STOP_ON_FIRST_FAILURE = bool(g.config.get("common", {}).get(
+ "stop_on_first_failure", False))
+
+ @classmethod
+ def setUpClass(cls):
+ """Initialize all the variables necessary for test cases."""
+ super(BaseClass, cls).setUpClass()
+
+ # Initializes OCP config variables
+ cls.ocp_servers_info = g.config['ocp_servers']
+ cls.ocp_master_node = g.config['ocp_servers']['master'].keys()
+ cls.ocp_master_node_info = g.config['ocp_servers']['master']
+ cls.ocp_client = g.config['ocp_servers']['client'].keys()
+ cls.ocp_client_info = g.config['ocp_servers']['client']
+ cls.ocp_nodes = g.config['ocp_servers']['nodes'].keys()
+ cls.ocp_nodes_info = g.config['ocp_servers']['nodes']
+
+ # Initializes storage project config variables
+ openshift_config = g.config.get("cns", g.config.get("openshift"))
+ cls.storage_project_name = openshift_config.get(
+ 'storage_project_name',
+ openshift_config.get('setup', {}).get('cns_project_name'))
+
+ # Initializes heketi config variables
+ heketi_config = openshift_config['heketi_config']
+ cls.heketi_dc_name = heketi_config['heketi_dc_name']
+ cls.heketi_service_name = heketi_config['heketi_service_name']
+ cls.heketi_client_node = heketi_config['heketi_client_node']
+ cls.heketi_server_url = heketi_config['heketi_server_url']
+ cls.heketi_cli_user = heketi_config['heketi_cli_user']
+ cls.heketi_cli_key = heketi_config['heketi_cli_key']
+
+ cls.gluster_servers = g.config['gluster_servers'].keys()
+ cls.gluster_servers_info = g.config['gluster_servers']
+
+ cls.storage_classes = openshift_config['dynamic_provisioning'][
+ 'storage_classes']
+ cls.sc = cls.storage_classes.get(
+ 'storage_class1', cls.storage_classes.get('file_storage_class'))
+ cmd = "echo -n %s | base64" % cls.heketi_cli_key
+ ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: %s "
+ "err: %s" % (
+ cmd, cls.ocp_master_node[0], out, err))
+ cls.secret_data_key = out.strip()
+
+ # Checks if heketi server is alive
+ if not hello_heketi(cls.heketi_client_node, cls.heketi_server_url):
+ raise ConfigError("Heketi server %s is not alive"
+ % cls.heketi_server_url)
+
+ # Switch to the storage project
+ if not switch_oc_project(
+ cls.ocp_master_node[0], cls.storage_project_name):
+ raise ExecutionError("Failed to switch oc project on node %s"
+ % cls.ocp_master_node[0])
+
+ if 'glustotest_run_id' not in g.config:
+ g.config['glustotest_run_id'] = (
+ datetime.datetime.now().strftime('%H_%M_%d_%m_%Y'))
+ cls.glustotest_run_id = g.config['glustotest_run_id']
+ msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
+ g.log.info(msg)
+
+ def setUp(self):
+ if (BaseClass.STOP_ON_FIRST_FAILURE and
+ BaseClass.ERROR_OR_FAILURE_EXISTS):
+ self.skipTest("Test is skipped, because of the restriction "
+ "to one test case failure.")
+
+ super(BaseClass, self).setUp()
+
+ msg = "Starting Test : %s : %s" % (self.id(), self.glustotest_run_id)
+ g.log.info(msg)
+
+ def tearDown(self):
+ super(BaseClass, self).tearDown()
+ msg = "Ending Test: %s : %s" % (self.id(), self.glustotest_run_id)
+ g.log.info(msg)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BaseClass, cls).tearDownClass()
+ msg = "Teardownclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
+ g.log.info(msg)
+
+ def cmd_run(self, cmd, hostname=None, raise_on_error=True):
+ if not hostname:
+ hostname = self.ocp_master_node[0]
+ return command.cmd_run(
+ cmd=cmd, hostname=hostname, raise_on_error=raise_on_error)
+
+ def create_secret(self, secret_name_prefix="autotests-secret-"):
+ secret_name = oc_create_secret(
+ self.ocp_client[0],
+ secret_name_prefix=secret_name_prefix,
+ namespace=(self.sc.get(
+ 'secretnamespace',
+ self.sc.get('restsecretnamespace', 'default'))),
+ data_key=self.heketi_cli_key,
+ secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
+ self.addCleanup(
+ oc_delete, self.ocp_client[0], 'secret', secret_name)
+ return secret_name
+
+ def create_storage_class(self, secret_name=None,
+ sc_name_prefix="autotests-sc",
+ create_vol_name_prefix=False,
+ allow_volume_expansion=False,
+ reclaim_policy="Delete",
+ set_hacount=None,
+ is_arbiter_vol=False, arbiter_avg_file_size=None):
+
+ # Create secret if one is not specified
+ if not secret_name:
+ secret_name = self.create_secret()
+
+ # Create storage class
+ secret_name_option = "secretname"
+ secret_namespace_option = "secretnamespace"
+ provisioner = self.sc.get("provisioner", "kubernetes.io/glusterfs")
+ if provisioner != "kubernetes.io/glusterfs":
+ secret_name_option = "rest%s" % secret_name_option
+ secret_namespace_option = "rest%s" % secret_namespace_option
+ parameters = {
+ "resturl": self.sc["resturl"],
+ "restuser": self.sc["restuser"],
+ secret_name_option: secret_name,
+ secret_namespace_option: self.sc.get(
+ "secretnamespace", self.sc.get("restsecretnamespace")),
+ }
+ if set_hacount:
+ parameters["hacount"] = self.sc.get("hacount", "3")
+ if is_arbiter_vol:
+ parameters["volumeoptions"] = "user.heketi.arbiter true"
+ if arbiter_avg_file_size:
+ parameters["volumeoptions"] += (
+ ",user.heketi.average-file-size %s" % (
+ arbiter_avg_file_size))
+ if create_vol_name_prefix:
+ parameters["volumenameprefix"] = self.sc.get(
+ "volumenameprefix", "autotest")
+ self.sc_name = oc_create_sc(
+ self.ocp_client[0],
+ sc_name_prefix=sc_name_prefix,
+ provisioner=provisioner,
+ allow_volume_expansion=allow_volume_expansion,
+ reclaim_policy=reclaim_policy,
+ **parameters)
+ self.addCleanup(oc_delete, self.ocp_client[0], "sc", self.sc_name)
+ return self.sc_name
+
+ def create_and_wait_for_pvcs(self, pvc_size=1,
+ pvc_name_prefix="autotests-pvc",
+ pvc_amount=1, sc_name=None,
+ timeout=120, wait_step=3):
+ node = self.ocp_client[0]
+
+ # Create storage class if not specified
+ if not sc_name:
+ if getattr(self, "sc_name", ""):
+ sc_name = self.sc_name
+ else:
+ sc_name = self.create_storage_class()
+
+ # Create PVCs
+ pvc_names = []
+ for i in range(pvc_amount):
+ pvc_name = oc_create_pvc(
+ node, sc_name, pvc_name_prefix=pvc_name_prefix,
+ pvc_size=pvc_size)
+ pvc_names.append(pvc_name)
+ self.addCleanup(
+ wait_for_resource_absence, node, 'pvc', pvc_name)
+
+ # Wait for PVCs to be in bound state
+ try:
+ for pvc_name in pvc_names:
+ verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
+ finally:
+ reclaim_policy = oc_get_custom_resource(
+ node, 'sc', ':.reclaimPolicy', sc_name)[0]
+
+ for pvc_name in pvc_names:
+ if reclaim_policy == 'Retain':
+ pv_name = get_pv_name_from_pvc(node, pvc_name)
+ self.addCleanup(oc_delete, node, 'pv', pv_name,
+ raise_on_absence=False)
+ custom = (r':.metadata.annotations."gluster\.kubernetes'
+ r'\.io\/heketi\-volume\-id"')
+ vol_id = oc_get_custom_resource(
+ node, 'pv', custom, pv_name)[0]
+ if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
+ self.addCleanup(heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url, vol_id,
+ raise_on_error=False)
+ else:
+ self.addCleanup(heketi_blockvolume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url, vol_id,
+ raise_on_error=False)
+ self.addCleanup(oc_delete, node, 'pvc', pvc_name,
+ raise_on_absence=False)
+
+ return pvc_names
+
+ def create_and_wait_for_pvc(self, pvc_size=1,
+ pvc_name_prefix='autotests-pvc', sc_name=None):
+ self.pvc_name = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix, sc_name=sc_name
+ )[0]
+ return self.pvc_name
+
+ def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10):
+ dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name)
+ self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
+ self.addCleanup(
+ scale_dc_pod_amount_and_wait, self.ocp_client[0], dc_name, 0)
+ pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name)
+ wait_for_pod_be_ready(self.ocp_client[0], pod_name,
+ timeout=timeout, wait_step=wait_step)
+ return dc_name, pod_name
+
+ def _is_error_or_failure_exists(self):
+ if hasattr(self, '_outcome'):
+ # Python 3.4+
+ result = self.defaultTestResult()
+ self._feedErrorsToResult(result, self._outcome.errors)
+ else:
+ # Python 2.7-3.3
+ result = getattr(
+ self, '_outcomeForDoCleanups', self._resultForDoCleanups)
+ ok_result = True
+ for attr in ('errors', 'failures'):
+ if not hasattr(result, attr):
+ continue
+ exc_list = getattr(result, attr)
+ if exc_list and exc_list[-1][0] is self:
+ ok_result = ok_result and not exc_list[-1][1]
+ if hasattr(result, '_excinfo'):
+ ok_result = ok_result and not result._excinfo
+ if ok_result:
+ return False
+ self.ERROR_OR_FAILURE_EXISTS = True
+ BaseClass.ERROR_OR_FAILURE_EXISTS = True
+ return True
+
+ def doCleanups(self):
+ if (BaseClass.STOP_ON_FIRST_FAILURE and (
+ self.ERROR_OR_FAILURE_EXISTS or
+ self._is_error_or_failure_exists())):
+ while self._cleanups:
+ (func, args, kwargs) = self._cleanups.pop()
+ msg = ("Found test case failure. Avoiding run of scheduled "
+ "following cleanup:\nfunc = %s\nargs = %s\n"
+ "kwargs = %s" % (func, args, kwargs))
+ g.log.warn(msg)
+ return super(BaseClass, self).doCleanups()
+
+ @classmethod
+ def doClassCleanups(cls):
+ if (BaseClass.STOP_ON_FIRST_FAILURE and
+ BaseClass.ERROR_OR_FAILURE_EXISTS):
+ while cls._class_cleanups:
+ (func, args, kwargs) = cls._class_cleanups.pop()
+ msg = ("Found test case failure. Avoiding run of scheduled "
+ "following cleanup:\nfunc = %s\nargs = %s\n"
+ "kwargs = %s" % (func, args, kwargs))
+ g.log.warn(msg)
+ return super(BaseClass, cls).doClassCleanups()
+
+
+class GlusterBlockBaseClass(BaseClass):
+ """Base class for gluster-block test cases."""
+
+ @classmethod
+ def setUpClass(cls):
+ """Initialize all the variables necessary for test cases."""
+ super(GlusterBlockBaseClass, cls).setUpClass()
+ cls.sc = cls.storage_classes.get(
+ 'storage_class2', cls.storage_classes.get('block_storage_class'))
diff --git a/openshift-storage-libs/openshiftstoragelibs/command.py b/openshift-storage-libs/openshiftstoragelibs/command.py
new file mode 100644
index 00000000..06912915
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/command.py
@@ -0,0 +1,23 @@
+from glusto.core import Glusto as g
+
+
+def cmd_run(cmd, hostname, raise_on_error=True):
+ """Glusto's command runner wrapper.
+
+ Args:
+ cmd (str): Shell command to run on the specified hostname.
+ hostname (str): hostname where Glusto should run specified command.
+ raise_on_error (bool): defines whether we should raise exception
+ in case command execution failed.
+ Returns:
+ str: Stripped shell command's stdout value if not None.
+ """
+ ret, out, err = g.run(hostname, cmd, "root")
+ if raise_on_error:
+ msg = ("Failed to execute command '%s' on '%s' node. Got non-zero "
+ "return code '%s'. Err: %s" % (cmd, hostname, ret, err))
+ assert int(ret) == 0, msg
+
+ out = out.strip() if out else out
+
+ return out
diff --git a/openshift-storage-libs/openshiftstoragelibs/exceptions.py b/openshift-storage-libs/openshiftstoragelibs/exceptions.py
new file mode 100644
index 00000000..44daee12
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/exceptions.py
@@ -0,0 +1,23 @@
+class ConfigError(Exception):
+ '''
+ Custom exception thrown when there is an unrecoverable configuration error.
+ For example, a required configuration key is not found.
+ '''
+
+
+class ExecutionError(Exception):
+ '''
+ Custom exception thrown when a command executed by Glusto results in an
+ unrecoverable error.
+
+ For example, all hosts are not in peer state or a volume cannot be setup.
+ '''
+
+
+class NotSupportedException(Exception):
+ '''
+ Custom exception thrown when we do not support a particular feature in
+ particular product version
+
+ For example, pv resize is not supported in OCP version < 3.9
+ '''
diff --git a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
new file mode 100644
index 00000000..8ac95d82
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
@@ -0,0 +1,260 @@
+import time
+import json
+import re
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.block_ops import block_list
+from glustolibs.gluster.heal_libs import is_heal_complete
+from glustolibs.gluster.volume_ops import (
+ get_volume_status,
+ get_volume_list,
+ volume_status,
+ volume_start,
+ volume_stop,
+)
+
+from openshiftstoragelibs import exceptions
+from openshiftstoragelibs.heketi_ops import heketi_blockvolume_info
+from openshiftstoragelibs.openshift_ops import cmd_run_on_gluster_pod_or_node
+from openshiftstoragelibs import podcmd
+from openshiftstoragelibs import waiter
+
+
+@podcmd.GlustoPod()
+def wait_to_heal_complete(timeout=300, wait_step=5):
+ """Monitors heal for volumes on gluster"""
+ gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
+ if not gluster_vol_list:
+ raise AssertionError("failed to get gluster volume list")
+
+ _waiter = waiter.Waiter(timeout=timeout, interval=wait_step)
+ for gluster_vol in gluster_vol_list:
+ for w in _waiter:
+ if is_heal_complete("auto_get_gluster_endpoint", gluster_vol):
+ break
+
+ if w.expired:
+ err_msg = ("reached timeout waiting for all the gluster volumes "
+ "to reach the 'healed' state.")
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+@podcmd.GlustoPod()
+def get_gluster_vol_status(file_vol):
+ """Get Gluster vol hosting nodes.
+
+ Args:
+ file_vol (str): file volume name.
+ """
+ # Get Gluster vol info
+ gluster_volume_status = get_volume_status(
+ "auto_get_gluster_endpoint", file_vol)
+ if not gluster_volume_status:
+ raise AssertionError("Failed to get volume status for gluster "
+ "volume '%s'" % file_vol)
+ if file_vol in gluster_volume_status:
+ gluster_volume_status = gluster_volume_status.get(file_vol)
+ return gluster_volume_status
+
+
+@podcmd.GlustoPod()
+def get_gluster_vol_hosting_nodes(file_vol):
+ """Get Gluster vol hosting nodes.
+
+ Args:
+ file_vol (str): file volume name.
+ """
+ vol_status = get_gluster_vol_status(file_vol)
+ g_nodes = []
+ for g_node, g_node_data in vol_status.items():
+ for process_name, process_data in g_node_data.items():
+ if not process_name.startswith("/var"):
+ continue
+ g_nodes.append(g_node)
+ return g_nodes
+
+
+@podcmd.GlustoPod()
+def restart_gluster_vol_brick_processes(ocp_client_node, file_vol,
+ gluster_nodes):
+ """Restarts brick process of a file volume.
+
+ Args:
+ ocp_client_node (str): Node to execute OCP commands on.
+ file_vol (str): file volume name.
+ gluster_nodes (str/list): One or several IPv4 addresses of Gluster
+ nodes, where 'file_vol' brick processes must be recreated.
+ """
+ if not isinstance(gluster_nodes, (list, set, tuple)):
+ gluster_nodes = [gluster_nodes]
+
+ # Get Gluster vol brick PIDs
+ gluster_volume_status = get_gluster_vol_status(file_vol)
+ pids = ()
+ for gluster_node in gluster_nodes:
+ pid = None
+ for g_node, g_node_data in gluster_volume_status.items():
+ if g_node != gluster_node:
+ continue
+ for process_name, process_data in g_node_data.items():
+ if not process_name.startswith("/var"):
+ continue
+ pid = process_data["pid"]
+ # When birck is down, pid of the brick is returned as -1.
+ # Which is unexepeted situation. So, add appropriate assertion.
+ assert pid != "-1", (
+ "Got unexpected PID (-1) for '%s' gluster vol on '%s' "
+ "node." % file_vol, gluster_node)
+ assert pid, ("Could not find 'pid' in Gluster vol data for '%s' "
+ "Gluster node. Data: %s" % (
+ gluster_node, gluster_volume_status))
+ pids.append((gluster_node, pid))
+
+ # Restart Gluster vol brick processes using found PIDs
+ for gluster_node, pid in pids:
+ cmd = "kill -9 %s" % pid
+ cmd_run_on_gluster_pod_or_node(ocp_client_node, cmd, gluster_node)
+
+ # Wait for Gluster vol brick processes to be recreated
+ for gluster_node, pid in pids:
+ killed_pid_cmd = "ps -eaf | grep %s | grep -v grep | awk '{print $2}'"
+ _waiter = waiter.Waiter(timeout=60, interval=2)
+ for w in _waiter:
+ result = cmd_run_on_gluster_pod_or_node(
+ ocp_client_node, killed_pid_cmd, gluster_node)
+ if result.strip() == pid:
+ continue
+ g.log.info("Brick process '%s' was killed successfully on '%s'" % (
+ pid, gluster_node))
+ break
+ if w.expired:
+ error_msg = ("Process ID '%s' still exists on '%s' after waiting "
+ "for it 60 seconds to get killed." % (
+ pid, gluster_node))
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+ # Start volume after gluster vol brick processes recreation
+ ret, out, err = volume_start(
+ "auto_get_gluster_endpoint", file_vol, force=True)
+ if ret != 0:
+ err_msg = "Failed to start gluster volume %s on %s. error: %s" % (
+ file_vol, gluster_node, err)
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+@podcmd.GlustoPod()
+def restart_file_volume(file_vol, sleep_time=120):
+ """Restars file volume service.
+
+ Args:
+ file_vol (str): name of a file volume
+ """
+ gluster_volume_status = get_volume_status(
+ "auto_get_gluster_endpoint", file_vol)
+ if not gluster_volume_status:
+ raise AssertionError("failed to get gluster volume status")
+
+ g.log.info("Gluster volume %s status\n%s : " % (
+ file_vol, gluster_volume_status)
+ )
+
+ ret, out, err = volume_stop("auto_get_gluster_endpoint", file_vol)
+ if ret != 0:
+ err_msg = "Failed to stop gluster volume %s. error: %s" % (
+ file_vol, err)
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+ # Explicit wait to stop ios and pvc creation for 2 mins
+ time.sleep(sleep_time)
+
+ ret, out, err = volume_start(
+ "auto_get_gluster_endpoint", file_vol, force=True)
+ if ret != 0:
+ err_msg = "failed to start gluster volume %s error: %s" % (
+ file_vol, err)
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+ ret, out, err = volume_status("auto_get_gluster_endpoint", file_vol)
+ if ret != 0:
+ err_msg = ("Failed to get status for gluster volume %s error: %s" % (
+ file_vol, err))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+@podcmd.GlustoPod()
+def match_heketi_and_gluster_block_volumes_by_prefix(
+ heketi_block_volumes, block_vol_prefix):
+ """Match block volumes from heketi and gluster. This function can't
+ be used for block volumes with custom prefixes
+
+ Args:
+ heketi_block_volumes (list): list of heketi block volumes with
+ which gluster block volumes need to
+ be matched
+ block_vol_prefix (str): block volume prefix by which the block
+ volumes needs to be filtered
+ """
+ gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
+
+ gluster_vol_block_list = []
+ for gluster_vol in gluster_vol_list[1:]:
+ ret, out, err = block_list("auto_get_gluster_endpoint", gluster_vol)
+ try:
+ if ret != 0 and json.loads(out)["RESULT"] == "FAIL":
+ msg = "failed to get block volume list with error: %s" % err
+ g.log.error(msg)
+ raise AssertionError(msg)
+ except Exception as e:
+ g.log.error(e)
+ raise
+
+ gluster_vol_block_list.extend([
+ block_vol.replace(block_vol_prefix, "")
+ for block_vol in json.loads(out)["blocks"]
+ if block_vol.startswith(block_vol_prefix)
+ ])
+
+ if cmp(sorted(gluster_vol_block_list), heketi_block_volumes) != 0:
+ err_msg = "Gluster and Heketi Block volume list match failed"
+ err_msg += "\nGluster Volumes: %s, " % gluster_vol_block_list
+ err_msg += "\nBlock volumes %s" % heketi_block_volumes
+ err_msg += "\nDifference: %s" % (set(gluster_vol_block_list) ^
+ set(heketi_block_volumes))
+ raise AssertionError(err_msg)
+
+
+@podcmd.GlustoPod()
+def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
+ block_volume):
+ """Returns block hosting volume name of given block volume
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ block_volume (str): Block volume of which block hosting volume
+ returned
+ Returns:
+ str : Name of the block hosting volume for given block volume
+ """
+ block_vol_info = heketi_blockvolume_info(
+ heketi_client_node, heketi_server_url, block_volume
+ )
+
+ for line in block_vol_info.splitlines():
+ block_hosting_vol_match = re.search(
+ "^Block Hosting Volume: (.*)$", line
+ )
+
+ if not block_hosting_vol_match:
+ continue
+
+ gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
+ for vol in gluster_vol_list:
+ if block_hosting_vol_match.group(1).strip() in vol:
+ return vol
diff --git a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
new file mode 100644
index 00000000..02fefe66
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
@@ -0,0 +1,1516 @@
+import json
+
+from glusto.core import Glusto as g
+
+from openshiftstoragelibs import exceptions
+from openshiftstoragelibs import heketi_version
+from openshiftstoragelibs.utils import parse_prometheus_data
+
+
+def _set_heketi_global_flags(heketi_server_url, **kwargs):
+ """Helper function to set heketi-cli global flags."""
+
+ heketi_server_url = (heketi_server_url if heketi_server_url else ("http:" +
+ "//heketi-storage-project.cloudapps.mystorage.com"))
+ json = kwargs.get("json")
+ secret = kwargs.get("secret")
+ user = kwargs.get("user")
+ json_arg = "--json" if json else ""
+ secret_arg = "--secret %s" % secret if secret else ""
+ user_arg = "--user %s" % user if user else ""
+ if not user_arg:
+ openshift_config = g.config.get("cns", g.config.get("openshift"))
+ heketi_cli_user = openshift_config['heketi_config']['heketi_cli_user']
+ if heketi_cli_user:
+ user_arg = "--user %s" % heketi_cli_user
+ heketi_cli_key = openshift_config[
+ 'heketi_config']['heketi_cli_key']
+ if heketi_cli_key is not None:
+ secret_arg = "--secret '%s'" % heketi_cli_key
+
+ return (heketi_server_url, json_arg, secret_arg, user_arg)
+
+
+def heketi_volume_create(heketi_client_node, heketi_server_url, size,
+ raw_cli_output=False, **kwargs):
+ """Creates heketi volume with the given user options.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ size (str): Volume size
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - block : (bool)
+ - clusters : (str)|None
+ - disperse_data : (int)|None
+ - durability : (str)|None
+ - gid : (int)|None
+ - gluster_volume_options : (str)|None
+ - name : (str)|None
+ - persistent_volume : (bool)
+ - persistent_volume_endpoint : (str)|None
+ - persistent_volume_file : (str)|None
+ - redundancy : (int):None
+ - replica : (int)|None
+ - size : (int):None
+ - snapshot-factor : (float)|None
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: volume create info on success, only cli option is specified
+ without --json option, then it returns raw string output.
+ Tuple (ret, out, err): if raw_cli_output is True.
+ Raises:
+ exceptions.ExecutionError when error occurs and raw_cli_output is False
+
+ Example:
+ heketi_volume_create(heketi_client_node, heketi_server_url, size)
+ """
+
+ if not kwargs.get('user'):
+ openshift_config = g.config.get("cns", g.config.get("openshift"))
+ heketi_cli_user = openshift_config['heketi_config']['heketi_cli_user']
+ if heketi_cli_user:
+ kwargs['user'] = heketi_cli_user
+ heketi_cli_key = openshift_config[
+ 'heketi_config']['heketi_cli_key']
+ if heketi_cli_key is not None:
+ kwargs['secret'] = heketi_cli_key
+
+ heketi_server_url = (heketi_server_url if heketi_server_url else ("http:" +
+ "//heketi-storage-project.cloudapps.mystorage.com"))
+
+ block_arg = "--block" if kwargs.get("block") else ""
+ clusters_arg = ("--clusters %s" % kwargs.get("clusters")
+ if kwargs.get("clusters") else "")
+ disperse_data_arg = ("--disperse-data %d" % kwargs.get("disperse_data")
+ if kwargs.get("disperse_data") else "")
+ durability_arg = ("--durability %s" % kwargs.get("durability")
+ if kwargs.get("durability") else "")
+ gid_arg = "--gid %d" % int(kwargs.get("gid")) if kwargs.get("gid") else ""
+ gluster_volume_options_arg = ("--gluster-volume-options '%s'"
+ % kwargs.get("gluster_volume_options")
+ if kwargs.get("gluster_volume_options")
+ else "")
+ name_arg = "--name %s" % kwargs.get("name") if kwargs.get("name") else ""
+ persistent_volume_arg = ("--persistent-volume %s"
+ % kwargs.get("persistent_volume")
+ if kwargs.get("persistent_volume") else "")
+ persistent_volume_endpoint_arg = ("--persistent-volume-endpoint %s"
+ % (kwargs.get(
+ "persistent_volume_endpoint"))
+ if (kwargs.get(
+ "persistent_volume_endpoint"))
+ else "")
+ persistent_volume_file_arg = ("--persistent-volume-file %s"
+ % kwargs.get("persistent_volume_file")
+ if kwargs.get("persistent_volume_file")
+ else "")
+ redundancy_arg = ("--redundancy %d" % int(kwargs.get("redundancy"))
+ if kwargs.get("redundancy") else "")
+ replica_arg = ("--replica %d" % int(kwargs.get("replica"))
+ if kwargs.get("replica") else "")
+ snapshot_factor_arg = ("--snapshot-factor %f"
+ % float(kwargs.get("snapshot_factor"))
+ if kwargs.get("snapshot_factor") else "")
+ json_arg = "--json" if kwargs.get("json") else ""
+ secret_arg = (
+ "--secret %s" % kwargs.get("secret") if kwargs.get("secret") else "")
+ user_arg = "--user %s" % kwargs.get("user") if kwargs.get("user") else ""
+
+ err_msg = "Failed to create volume. "
+
+ cmd = ("heketi-cli -s %s volume create --size=%s %s %s %s %s %s %s "
+ "%s %s %s %s %s %s %s %s %s %s" % (
+ heketi_server_url, str(size), block_arg, clusters_arg,
+ disperse_data_arg, durability_arg, gid_arg,
+ gluster_volume_options_arg, name_arg,
+ persistent_volume_arg, persistent_volume_endpoint_arg,
+ persistent_volume_file_arg, redundancy_arg, replica_arg,
+ snapshot_factor_arg, json_arg, secret_arg, user_arg))
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ err_msg += "Out: %s \n Err: %s" % (out, err)
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_volume_info(heketi_client_node, heketi_server_url, volume_id,
+ raw_cli_output=False, **kwargs):
+ """Executes heketi volume info command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ volume_id (str): Volume ID
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: volume info on success
+ False: in case of failure
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_volume_info(heketi_client_node, volume_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s volume info %s %s %s %s" % (
+ heketi_server_url, volume_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
+ expand_size, raw_cli_output=False, **kwargs):
+ """Executes heketi volume expand command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ volume_id (str): Volume ID
+ expand_size (str): volume expand size
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: volume expand info on success, only cli option is specified
+ without --json option, then it returns raw string output.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
+ expand_size)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = ("heketi-cli -s %s volume expand --volume=%s "
+ "--expand-size=%s %s %s %s" % (
+ heketi_server_url, volume_id, expand_size, json_arg,
+ admin_key, user))
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
+ raw_cli_output=False, raise_on_error=True, **kwargs):
+ """Executes heketi volume delete command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ volume_id (str): Volume ID
+ raise_on_error (bool): whether or not to raise exception
+ in case of an error.
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: volume delete command output on success
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError when error occurs and raw_cli_output is False
+
+ Example:
+ heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+ err_msg = "Failed to delete '%s' volume. " % volume_id
+
+ cmd = "heketi-cli -s %s volume delete %s %s %s %s" % (
+ heketi_server_url, volume_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ err_msg += "Out: %s, \nErr: %s" % (out, err)
+ g.log.error(err_msg)
+ if raise_on_error:
+ raise exceptions.ExecutionError(err_msg)
+ return out
+
+
+def heketi_volume_list(heketi_client_node, heketi_server_url,
+ raw_cli_output=False, **kwargs):
+ """Executes heketi volume list command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: volume list with --json on success, if cli option is specified
+ without --json option or with url, it returns raw string output.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_volume_info(heketi_client_node, heketi_server_url)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s volume list %s %s %s" % (
+ heketi_server_url, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_topology_info(heketi_client_node, heketi_server_url,
+ raw_cli_output=False, **kwargs):
+ """Executes heketi topology info command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: topology info if --json option is specified. If only cli option
+ is specified, raw command output is returned on success.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_topology_info(heketi_client_node, heketi_server_url)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s topology info %s %s %s" % (
+ heketi_server_url, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def hello_heketi(heketi_client_node, heketi_server_url, **kwargs):
+ """Executes curl command to check if heketi server is alive.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ bool: True, if heketi server is alive
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ hello_heketi(heketi_client_node, heketi_server_url)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "curl --max-time 10 %s/hello" % heketi_server_url
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return True
+
+
+def heketi_cluster_delete(heketi_client_node, heketi_server_url, cluster_id,
+ **kwargs):
+ """Executes heketi cluster delete command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ cluster_id (str): Cluster ID
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: cluster delete command output on success
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_cluster_delete(heketi_client_node, heketi_server_url,
+ cluster_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s cluster delete %s %s %s %s" % (
+ heketi_server_url, cluster_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return out
+
+
+def heketi_cluster_info(heketi_client_node, heketi_server_url, cluster_id,
+ **kwargs):
+ """Executes heketi cluster info command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ cluster_id (str): Volume ID
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: cluster info on success
+ False: in case of failure
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_cluster_info(heketi_client_node, volume_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s cluster info %s %s %s %s" % (
+ heketi_server_url, cluster_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_cluster_list(heketi_client_node, heketi_server_url, **kwargs):
+ """Executes heketi cluster list command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: cluster list with --json on success, if cli option is specified
+ without --json option or with url, it returns raw string output.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_cluster_info(heketi_client_node, heketi_server_url)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s cluster list %s %s %s" % (
+ heketi_server_url, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_device_add(heketi_client_node, heketi_server_url, device_name,
+ node_id, raw_cli_output=False, **kwargs):
+ """Executes heketi device add command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ device name (str): Device name to add
+ node_id (str): Node id to add the device
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: heketi device add command output on success.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_device_add(heketi_client_node, heketi_server_url, device_name,
+ node_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s device add --name=%s --node=%s %s %s %s" % (
+ heketi_server_url, device_name, node_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return out
+
+
+def heketi_device_delete(heketi_client_node, heketi_server_url, device_id,
+ raw_cli_output=False, **kwargs):
+ """Executes heketi device delete command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ device id (str): Device id to delete
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: heketi device delete command output on success.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_device_delete(heketi_client_node, heketi_server_url, device_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s device delete %s %s %s %s" % (
+ heketi_server_url, device_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return out
+
+
+def heketi_device_disable(heketi_client_node, heketi_server_url, device_id,
+ raw_cli_output=False, **kwargs):
+ """Executes heketi device disable command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ device_id (str): Device id to disable device
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: heketi device disable command output on success.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_device_disable(heketi_client_node, heketi_server_url, device_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+ cmd = "heketi-cli -s %s device disable %s %s %s %s" % (
+ heketi_server_url, device_id, json_arg, admin_key, user)
+
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return out
+
+
+def heketi_device_enable(heketi_client_node, heketi_server_url, device_id,
+ raw_cli_output=False, **kwargs):
+ """Executes heketi device enable command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ device_id (str): Device id to enable device
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: heketi device enable command output on success.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_device_enable(heketi_client_node, heketi_server_url, device_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+ cmd = "heketi-cli -s %s device enable %s %s %s %s" % (
+ heketi_server_url, device_id, json_arg, admin_key, user)
+
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return out
+
+
+def heketi_device_info(heketi_client_node, heketi_server_url, device_id,
+ raw_cli_output=False, **kwargs):
+ """Executes heketi device info command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ device_id (str): Device ID
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ Str: device info as raw CLI output if "json" arg is not provided.
+ Dict: device info parsed to dict if "json" arg is provided.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_device_info(heketi_client_node, heketi_server_url, device_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s device info %s %s %s %s" % (
+ heketi_server_url, device_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+
+ if json_arg:
+ device_info = json.loads(out)
+ return device_info
+ else:
+ return out
+
+
+def heketi_device_remove(heketi_client_node, heketi_server_url, device_id,
+ raw_cli_output=False, **kwargs):
+ """Executes heketi device remove command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ device_id (str): Device id to remove device
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: heketi device remove command output on success.
+ Tuple (ret, out, err): if raw_cli_output is True
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_device_remove(heketi_client_node, heketi_server_url, device_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s device remove %s %s %s %s" % (
+ heketi_server_url, device_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+
+ return out
+
+
+def heketi_node_delete(heketi_client_node, heketi_server_url, node_id,
+ **kwargs):
+ """Executes heketi node delete command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url.
+ node_id (str): Node id to delete
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: heketi node delete command output on success.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_node_delete(heketi_client_node, heketi_server_url, node_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s node delete %s %s %s %s" % (
+ heketi_server_url, node_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return out
+
+
+def heketi_node_disable(heketi_client_node, heketi_server_url, node_id,
+ **kwargs):
+ """Executes heketi node disable command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ node_id (str): Node id to disable node
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: heketi node disable command output on success.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_node_disable(heketi_client_node, heketi_server_url, node_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s node disable %s %s %s %s" % (
+ heketi_server_url, node_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return out
+
+
+def heketi_node_enable(heketi_client_node, heketi_server_url, node_id,
+ **kwargs):
+ """Executes heketi node enable command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ node_id (str): Node id to enable device
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: heketi node enable command output on success.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_node_enable(heketi_client_node, heketi_server_url, node_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s node enable %s %s %s %s" % (
+ heketi_server_url, node_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ return out
+
+
+def heketi_node_info(heketi_client_node, heketi_server_url, node_id, **kwargs):
+ """Executes heketi node info command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ node_id (str): Node ID
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: node info on success,
+ str: raw output if 'json' arg is not provided.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_node_info(heketi_client_node, heketi_server_url, node_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s node info %s %s %s %s" % (
+ heketi_server_url, node_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_node_list(heketi_client_node, heketi_server_url,
+ heketi_user=None, heketi_secret=None):
+ """Execute CLI 'heketi node list' command and parse its output.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed
+ heketi_server_url (str): Heketi server url to perform request to
+ heketi_user (str): Name of the user to perform request with
+ heketi_secret (str): Secret for 'heketi_user'
+ Returns:
+ list of strings which are node IDs
+ Raises: openshiftstoragelibs.exceptions.ExecutionError when command fails.
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, user=heketi_user, secret=heketi_secret)
+
+ cmd = "heketi-cli -s %s node list %s %s %s" % (
+ heketi_server_url, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+
+ heketi_node_id_list = []
+ for line in out.strip().split("\n"):
+ # Line looks like this: 'Id:nodeIdString\tCluster:clusterIdString'
+ heketi_node_id_list.append(
+ line.strip().split("Cluster")[0].strip().split(":")[1])
+ return heketi_node_id_list
+
+
+def heketi_blockvolume_info(heketi_client_node, heketi_server_url,
+ block_volume_id, **kwargs):
+ """Executes heketi blockvolume info command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ block_volume_id (str): block volume ID
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: block volume info on success.
+ str: raw output if 'json' arg is not provided.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_blockvolume_info(heketi_client_node, block_volume_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s blockvolume info %s %s %s %s" % (
+ heketi_server_url, block_volume_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_blockvolume_create(heketi_client_node, heketi_server_url, size,
+ **kwargs):
+ """Executes heketi blockvolume create
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ size (int): blockvolume size
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - name : (str)|None
+ - cluster : (str)|None
+ - ha : (int)|None
+ - auth : (bool)
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: blockvolume create info on success, only cli option is specified
+ without --json option, then it returns raw string output.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_blockvolume_create(heketi_client_node, heketi_server_url, size)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ auth = clusters = ha = name = None
+ if heketi_server_url is None:
+ heketi_server_url = ("http://" +
+ "heketi-storage-project.cloudapps.mystorage.com")
+
+ if 'auth' in kwargs:
+ auth = kwargs['auth']
+ if 'clusters' in kwargs:
+ clusters = kwargs['clusters']
+ if 'ha' in kwargs:
+ ha = int(kwargs['ha'])
+ if 'name' in kwargs:
+ name = kwargs['name']
+
+ auth_arg = clusters_arg = ha_arg = name_arg = ''
+
+ if auth:
+ auth_arg = "--auth"
+ if clusters is not None:
+ clusters_arg = "--clusters %s" % clusters
+ if ha is not None:
+ ha_arg = "--ha %d" % ha
+ if name is not None:
+ name_arg = "--name %s" % name
+
+ cmd = ("heketi-cli -s %s blockvolume create --size=%s %s %s %s %s "
+ "%s %s %s %s" % (heketi_server_url, str(size), auth_arg,
+ clusters_arg, ha_arg, name_arg, name_arg,
+ admin_key, user, json_arg))
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def heketi_blockvolume_delete(heketi_client_node, heketi_server_url,
+ block_volume_id, raise_on_error=True, **kwargs):
+ """Executes heketi blockvolume delete command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ block_volume_id (str): block volume ID
+ raise_on_error (bool): whether or not to raise exception
+ in case of an error.
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ str: volume delete command output on success
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_blockvolume_delete(heketi_client_node, heketi_server_url,
+ block_volume_id)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+ err_msg = "Failed to delete '%s' volume. " % block_volume_id
+
+ cmd = "heketi-cli -s %s blockvolume delete %s %s %s %s" % (
+ heketi_server_url, block_volume_id, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ err_msg += "Out: %s, \nErr: %s" % (out, err)
+ g.log.error(err_msg)
+ if raise_on_error:
+ raise exceptions.ExecutionError(err_msg)
+ return out
+
+
+def heketi_blockvolume_list(heketi_client_node, heketi_server_url, **kwargs):
+ """Executes heketi blockvolume list command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: volume list with --json on success, if cli option is specified
+ without --json option or with url, it returns raw string output.
+ False otherwise
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Example:
+ heketi_volume_info(heketi_client_node, heketi_server_url)
+ """
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s %s blockvolume list %s %s %s" % (
+ heketi_server_url, json_arg, admin_key, user)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, heketi_client_node, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if json_arg:
+ return json.loads(out)
+ return out
+
+
+def verify_volume_name_prefix(hostname, prefix, namespace, pvc_name,
+ heketi_server_url, **kwargs):
+ """Checks whether heketi voluem is present with volname prefix or not.
+
+ Args:
+ hostname (str): hostname on which we want
+ to check the heketi vol
+ prefix (str): volnameprefix given in storageclass
+ namespace (str): namespace
+ pvc_name (str): name of the pvc
+ heketi_server_url (str): Heketi server url
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ bool: True if volume found.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+ """
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ heketi_vol_name_prefix = "%s_%s_%s_" % (prefix, namespace, pvc_name)
+ cmd = "heketi-cli -s %s volume list %s %s %s | grep %s" % (
+ heketi_server_url, json_arg, admin_key, user, heketi_vol_name_prefix)
+ ret, out, err = g.run(hostname, cmd, "root")
+
+ if ret != 0:
+ msg = (
+ "Failed to execute '%s' command on '%s' node with following "
+ "error: %s" % (cmd, hostname, err))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ output = out.strip()
+ g.log.info("heketi volume with volnameprefix present %s" % output)
+ return True
+
+
+def set_tags(heketi_client_node, heketi_server_url, source, source_id, tag,
+ **kwargs):
+ """Set any tags on Heketi node or device.
+
+ Args:
+ - heketi_client_node (str) : Node where we want to run our commands.
+ eg. "10.70.47.64"
+ - heketi_server_url (str) : This is a heketi server url
+ eg. "http://172.30.147.142:8080
+ - source (str) : This var is for node or device whether we
+ want to set tag on node or device.
+ Allowed values are "node" and "device".
+ - sorrce_id (str) : ID of node or device.
+ eg. "4f9c0249834919dd372e8fb3344cd7bd"
+ - tag (str) : This is a tag which we want to set
+ eg. "arbiter:required"
+ Kwargs:
+ user (str) : username
+ secret (str) : secret for that user
+ Returns:
+ True : if successful
+ Raises:
+ ValueError : when improper input data are provided.
+ exceptions.ExecutionError : when command fails.
+ """
+
+ if source not in ('node', 'device'):
+ msg = ("Incorrect value we can use 'node' or 'device' instead of %s."
+ % source)
+ g.log.error(msg)
+ raise ValueError(msg)
+
+ heketi_server_url, json_args, secret, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = ("heketi-cli -s %s %s settags %s %s %s %s" %
+ (heketi_server_url, source, source_id, tag, user, secret))
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if not ret:
+ g.log.info("Tagging of %s to %s is successful" % (source, tag))
+ return True
+
+ g.log.error(err)
+ raise exceptions.ExecutionError(err)
+
+
+def set_arbiter_tag(heketi_client_node, heketi_server_url, source,
+ source_id, arbiter_tag_value, **kwargs):
+ """Set Arbiter tags on Heketi node or device.
+
+ Args:
+ - heketi_client_node (str) : node where we want to run our commands.
+ eg. "10.70.47.64"
+ - heketi_server_url (str) : This is a heketi server url
+ eg. "http://172.30.147.142:8080
+ - source (str) : This var is for node or device whether we
+ want to set tag on node or device.
+ Allowed values are "node" and "device".
+ - source_id (str) : ID of Heketi node or device
+ eg. "4f9c0249834919dd372e8fb3344cd7bd"
+ - arbiter_tag_value (str) : This is a tag which we want to set
+ Allowed values are "required", "disabled" and "supported".
+ Kwargs:
+ user (str) : username
+ secret (str) : secret for that user
+ Returns:
+ True : if successful
+ Raises:
+ ValueError : when improper input data are provided.
+ exceptions.ExecutionError : when command fails.
+ """
+
+ version = heketi_version.get_heketi_version(heketi_client_node)
+ if version < '6.0.0-11':
+ msg = ("heketi-client package %s does not support arbiter "
+ "functionality" % version.v_str)
+ g.log.error(msg)
+ raise NotImplementedError(msg)
+
+ if arbiter_tag_value in ('required', 'disabled', 'supported'):
+ arbiter_tag_value = "arbiter:%s" % arbiter_tag_value
+ return set_tags(heketi_client_node, heketi_server_url,
+ source, source_id, arbiter_tag_value, **kwargs)
+
+ msg = ("Incorrect value we can use 'required', 'disabled', 'supported'"
+ "instead of %s" % arbiter_tag_value)
+ g.log.error(msg)
+ raise ValueError(msg)
+
+
+def rm_tags(heketi_client_node, heketi_server_url, source, source_id, tag,
+ **kwargs):
+ """Remove any kind of tags from Heketi node or device.
+
+ Args:
+ - heketi_client_node (str) : Node where we want to run our commands.
+ eg. "10.70.47.64"
+ - heketi_server_url (str) : This is a heketi server url
+ eg. "http://172.30.147.142:8080
+ - source (str) : This var is for node or device whether we
+ want to set tag on node or device.
+ Allowed values are "node" and "device".
+ - sorrce_id (str) : id of node or device
+ eg. "4f9c0249834919dd372e8fb3344cd7bd"
+ - tag (str) : This is a tag which we want to remove.
+ Kwargs:
+ user (str) : username
+ secret (str) : secret for that user
+ Returns:
+ True : if successful
+ Raises:
+ ValueError : when improper input data are provided.
+ exceptions.ExecutionError : when command fails.
+ """
+
+ heketi_server_url, json_args, secret, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+ if source not in ('node', 'device'):
+ msg = ("Incorrect value we can use 'node' or 'device' instead of %s."
+ % source)
+ g.log.error(msg)
+ raise ValueError(msg)
+
+ cmd = ("heketi-cli -s %s %s rmtags %s %s %s %s" %
+ (heketi_server_url, source, source_id, tag, user, secret))
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if not ret:
+ g.log.info("Removal of %s tag from %s is successful." % (tag, source))
+ return True
+
+ g.log.error(err)
+ raise exceptions.ExecutionError(err)
+
+
+def rm_arbiter_tag(heketi_client_node, heketi_server_url, source, source_id,
+ **kwargs):
+ """Remove Arbiter tag from Heketi node or device.
+
+ Args:
+ - heketi_client_node (str) : Node where we want to run our commands.
+ eg. "10.70.47.64"
+ - heketi_server_url (str) : This is a heketi server url
+ eg. "http://172.30.147.142:8080
+ - source (str) : This var is for node or device whether we
+ want to set tag on node or device.
+ Allowed values are "node" and "device".
+ - source_id (str) : ID of Heketi node or device.
+ eg. "4f9c0249834919dd372e8fb3344cd7bd"
+ Kwargs:
+ user (str) : username
+ secret (str) : secret for that user
+ Returns:
+ True : if successful
+ Raises:
+ ValueError : when improper input data are provided.
+ exceptions.ExecutionError : when command fails.
+ """
+
+ version = heketi_version.get_heketi_version(heketi_client_node)
+ if version < '6.0.0-11':
+ msg = ("heketi-client package %s does not support arbiter "
+ "functionality" % version.v_str)
+ g.log.error(msg)
+ raise NotImplementedError(msg)
+
+ return rm_tags(heketi_client_node, heketi_server_url,
+ source, source_id, 'arbiter', **kwargs)
+
+
+def get_heketi_metrics(heketi_client_node, heketi_server_url,
+ prometheus_format=False):
+ """Execute curl command to get metrics output.
+
+ Args:
+ - heketi_client_node (str) : Node where we want to run our commands.
+ - heketi_server_url (str) : This is a heketi server url.
+ - prometheus_format (bool) : control the format of output
+ by default it is False, So it will parse prometheus format into
+ python dict. If we need prometheus format we have to set it True.
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+
+ Returns:
+ Metrics output: if successful
+ """
+
+ version = heketi_version.get_heketi_version(heketi_client_node)
+ if version < '6.0.0-14':
+ msg = ("heketi-client package %s does not support heketi "
+ "metrics functionality" % version.v_str)
+ g.log.error(msg)
+ raise NotImplementedError(msg)
+
+ cmd = "curl --max-time 10 %s/metrics" % heketi_server_url
+ ret, out, err = g.run(heketi_client_node, cmd)
+ if ret != 0:
+ msg = "failed to get Heketi metrics with following error: %s" % err
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if prometheus_format:
+ return out.strip()
+ return parse_prometheus_data(out)
+
+
+def heketi_examine_gluster(heketi_client_node, heketi_server_url):
+ """Execute heketi command to examine output from gluster servers.
+
+ Args:
+ - heketi_client_node (str): Node where we want to run our commands.
+ - heketi_server_url (str): This is a heketi server url.
+
+ Raises:
+ NotImplementedError: if heketi version is not expected
+ exceptions.ExecutionError: if command fails.
+
+ Returns:
+ dictionary: if successful
+ """
+
+ version = heketi_version.get_heketi_version(heketi_client_node)
+ if version < '8.0.0-7':
+ msg = ("heketi-client package %s does not support server state examine"
+ " gluster" % version.v_str)
+ g.log.error(msg)
+ raise NotImplementedError(msg)
+
+ heketi_server_url, json_arg, secret, user = _set_heketi_global_flags(
+ heketi_server_url)
+ # output is always json-like and we do not need to provide "--json" CLI arg
+ cmd = ("heketi-cli server state examine gluster -s %s %s %s"
+ % (heketi_server_url, user, secret))
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if ret != 0:
+ msg = "failed to examine gluster with following error: %s" % err
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+
+ return json.loads(out)
diff --git a/openshift-storage-libs/openshiftstoragelibs/heketi_version.py b/openshift-storage-libs/openshiftstoragelibs/heketi_version.py
new file mode 100644
index 00000000..0da81176
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/heketi_version.py
@@ -0,0 +1,246 @@
+"""
+Use this module for any Heketi server and client packages versions comparisons.
+
+Usage example:
+
+ # Assume Heketi server version is '7.0.0-3' and client is '7.0.0-5'
+ Then we have following:
+
+ from openshiftstoragelibs import heketi_version
+ version = heketi_version.get_heketi_version()
+ if version < '7.0.0-4':
+ # True
+ if version < '7.0.0-2':
+ # False
+ if '7.0.0-2' < version <= '7.0.0-3':
+ # True
+
+ At first step, we compare requested version against the Heketi server
+ version, making sure they are compatible. Then, we make sure, that
+ existing heketi client package has either the same or newer version than
+ server's one.
+"""
+import re
+
+from glusto.core import Glusto as g
+import six
+
+from openshiftstoragelibs import command
+from openshiftstoragelibs import exceptions
+
+
+HEKETI_VERSION_RE = r"(\d+)(?:\.)(\d+)(?:\.)(\d+)(?:\-)(\d+)$"
+HEKETI_CLIENT_VERSION = None
+HEKETI_SERVER_VERSION = None
+
+
+def _get_heketi_client_version_str(hostname=None):
+ """Gets Heketi client package version from heketi client node.
+
+ Args:
+ hostname (str): Node on which the version check command should run.
+ Returns:
+ str : heketi version, i.e. '7.0.0-1'
+ Raises: 'exceptions.ExecutionError' if failed to get version
+ """
+ if not hostname:
+ openshift_config = g.config.get("cns", g.config.get("openshift"))
+ heketi_config = openshift_config['heketi_config']
+ hostname = heketi_config['heketi_client_node'].strip()
+ cmd = ("rpm -q heketi-client --queryformat '%{version}-%{release}\n' | "
+ "cut -d '.' -f 1,2,3")
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ msg = ("Failed to get heketi client version. "
+ "\n'err': %s\n 'out': %s" % (err, out))
+ g.log.error(msg)
+ raise AssertionError(msg)
+ out = out.strip()
+ if not out:
+ error_msg = "Empty output for '%s' cmd: '%s'" % (cmd, out)
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+ return out
+
+
+def _get_heketi_server_version_str(ocp_client_node=None):
+ """Gets Heketi server package version from Heketi POD.
+
+ Args:
+ ocp_client_node (str): Node on which the version check command should
+ run.
+ Returns:
+ str : heketi version, i.e. '7.0.0-1'
+ Raises: 'exceptions.ExecutionError' if failed to get version
+ """
+ if not ocp_client_node:
+ ocp_client_node = g.config["ocp_servers"]["client"].keys()[0]
+ get_package_version_cmd = (
+ "rpm -q heketi --queryformat '%{version}-%{release}\n' | "
+ "cut -d '.' -f 1,2,3")
+
+ # NOTE(vponomar): we implement Heketi POD call command here, not in common
+ # module for OC commands just to avoid cross-reference imports.
+ get_pods_cmd = "oc get -o wide --no-headers=true pods --selector heketi"
+ heketi_pods = command.cmd_run(get_pods_cmd, hostname=ocp_client_node)
+
+ err_msg = ""
+ for heketi_pod_line in heketi_pods.split("\n"):
+ heketi_pod_data = heketi_pod_line.split()
+ if ("-deploy" in heketi_pod_data[0] or
+ heketi_pod_data[1].lower() != "1/1" or
+ heketi_pod_data[2].lower() != "running"):
+ continue
+ try:
+ pod_cmd = "oc exec %s -- %s" % (
+ heketi_pod_data[0], get_package_version_cmd)
+ return command.cmd_run(pod_cmd, hostname=ocp_client_node)
+ except Exception as e:
+ err = ("Failed to run '%s' command on '%s' Heketi POD. "
+ "Error: %s\n" % (pod_cmd, heketi_pod_data[0], e))
+ err_msg += err
+ g.log.error(err)
+ if not err_msg:
+ err_msg += "Haven't found 'Running' and 'ready' (1/1) Heketi PODs.\n"
+ err_msg += "Heketi PODs: %s" % heketi_pods
+ raise exceptions.ExecutionError(err_msg)
+
+
+def _parse_heketi_version(heketi_version_str):
+ """Parses Heketi version str into tuple of 4 values.
+
+ Args:
+ heketi_version_str (str): Heketi version like '7.0.0-1'
+ Returns:
+ Tuple object of 4 values - major, minor, micro and build version parts.
+ """
+ groups = re.findall(HEKETI_VERSION_RE, heketi_version_str)
+ err_msg = (
+ "Failed to parse '%s' str into 4 Heketi version parts. "
+ "Expected value like '7.0.0-1'" % heketi_version_str)
+ assert groups, err_msg
+ assert len(groups) == 1, err_msg
+ assert len(groups[0]) == 4, err_msg
+ return (int(groups[0][0]), int(groups[0][1]),
+ int(groups[0][2]), int(groups[0][3]))
+
+
+class HeketiVersion(object):
+ """Eases Heketi versions comparison.
+
+ Instance of this class can be used for comparison with other instance of
+ it or to string-like objects.
+
+ Input str version is required to have 4 version parts -
+ 'major', 'minor', 'micro' and 'build' versions. Example - '7.0.0-1'
+
+ Usage example (1) - compare to string object:
+ version_7_0_0_2 = HeketiVersion('7.0.0-2')
+ cmp_result = '7.0.0-1' < version_7_0_0_2 <= '8.0.0-1'
+
+ Usage example (2) - compare to the same type of an object:
+ version_7_0_0_1 = HeketiVersion('7.0.0-1')
+ version_7_0_0_2 = HeketiVersion('7.0.0-2')
+ cmp_result = version_7_0_0_1 < version_7_0_0_2
+ """
+ def __init__(self, heketi_version_str):
+ self.v = _parse_heketi_version(heketi_version_str)
+ self.v_str = heketi_version_str
+ self.major, self.minor, self.micro, self.build = self.v
+
+ def __str__(self):
+ return self.v_str
+
+ def _adapt_other(self, other):
+ if isinstance(other, six.string_types):
+ return HeketiVersion(other)
+ elif isinstance(other, HeketiVersion):
+ return other
+ else:
+ raise NotImplementedError(
+ "'%s' type is not supported for Heketi version "
+ "comparison." % type(other))
+
+ def _compare_client_and_server_versions(self, client_v, server_v):
+ if client_v < server_v:
+ raise Exception(
+ "Client version (%s) is older than server's (%s)." % (
+ client_v, server_v))
+
+ def __lt__(self, other):
+ global HEKETI_CLIENT_VERSION
+ global HEKETI_SERVER_VERSION
+ self._compare_client_and_server_versions(
+ HEKETI_CLIENT_VERSION.v, HEKETI_SERVER_VERSION.v)
+ adapted_other = self._adapt_other(other)
+ return self.v < adapted_other.v
+
+ def __le__(self, other):
+ global HEKETI_CLIENT_VERSION
+ global HEKETI_SERVER_VERSION
+ self._compare_client_and_server_versions(
+ HEKETI_CLIENT_VERSION.v, HEKETI_SERVER_VERSION.v)
+ adapted_other = self._adapt_other(other)
+ return self.v <= adapted_other.v
+
+ def __eq__(self, other):
+ global HEKETI_CLIENT_VERSION
+ global HEKETI_SERVER_VERSION
+ self._compare_client_and_server_versions(
+ HEKETI_CLIENT_VERSION.v, HEKETI_SERVER_VERSION.v)
+ adapted_other = self._adapt_other(other)
+ return self.v == adapted_other.v
+
+ def __ge__(self, other):
+ global HEKETI_CLIENT_VERSION
+ global HEKETI_SERVER_VERSION
+ self._compare_client_and_server_versions(
+ HEKETI_CLIENT_VERSION.v, HEKETI_SERVER_VERSION.v)
+ adapted_other = self._adapt_other(other)
+ return self.v >= adapted_other.v
+
+ def __gt__(self, other):
+ global HEKETI_CLIENT_VERSION
+ global HEKETI_SERVER_VERSION
+ self._compare_client_and_server_versions(
+ HEKETI_CLIENT_VERSION.v, HEKETI_SERVER_VERSION.v)
+ adapted_other = self._adapt_other(other)
+ return self.v > adapted_other.v
+
+ def __ne__(self, other):
+ global HEKETI_CLIENT_VERSION
+ global HEKETI_SERVER_VERSION
+ self._compare_client_and_server_versions(
+ HEKETI_CLIENT_VERSION.v, HEKETI_SERVER_VERSION.v)
+ adapted_other = self._adapt_other(other)
+ return self.v != adapted_other.v
+
+
+def get_heketi_version(hostname=None, ocp_client_node=None):
+ """Cacher of the Heketi client package version.
+
+ Version of Heketi client package is constant value. So, we call API just
+ once and then reuse it's output.
+
+ Args:
+ hostname (str): a node with 'heketi' client where command should run on
+ If not specified, then first key
+ from 'openshift.heketi_config.heketi_client_node' config option
+ will be picked up.
+ ocp_client_node (str): a node with the 'oc' client,
+ where Heketi POD command will run.
+ If not specified, then first key
+ from 'ocp_servers.client' config option will be picked up.
+ Returns:
+ HeketiVersion object instance.
+ """
+ global HEKETI_CLIENT_VERSION
+ global HEKETI_SERVER_VERSION
+ if not (HEKETI_SERVER_VERSION and HEKETI_CLIENT_VERSION):
+ client_version_str = _get_heketi_client_version_str(hostname=hostname)
+ server_version_str = _get_heketi_server_version_str(
+ ocp_client_node=ocp_client_node)
+ HEKETI_CLIENT_VERSION = HeketiVersion(client_version_str)
+ HEKETI_SERVER_VERSION = HeketiVersion(server_version_str)
+ return HEKETI_SERVER_VERSION
diff --git a/openshift-storage-libs/openshiftstoragelibs/naming.py b/openshift-storage-libs/openshiftstoragelibs/naming.py
new file mode 100644
index 00000000..b44559ad
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/naming.py
@@ -0,0 +1,56 @@
+"""Helper functions for working with names for volumes, resources, etc.
+"""
+
+import string
+import random
+import re
+
+# we only use lowercase here because kubernetes requires
+# names to be lowercase or digits, so that is our default
+UNIQUE_CHARS = (string.lowercase + string.digits)
+
+
+def make_unique_label(prefix=None, suffix=None, sep='-',
+ clean=r'[^a-zA-Z0-9]+', unique_len=8,
+ unique_chars=UNIQUE_CHARS):
+ """Generate a unique name string based on an optional prefix,
+ suffix, and pseudo-random set of alphanumeric characters.
+
+ Args:
+ prefix (str): Start of the unique string.
+ suffix (str): End of the unique string.
+ sep (str): Separator string (between sections/invalid chars).
+ clean (str): Reqular expression matching invalid chars.
+ that will be replaced by `sep` if found in the prefix or suffix
+ unique_len (int): Length of the unique part.
+ unique_chars (str): String representing the set of characters
+ the unique part will draw from.
+ Returns:
+ str: The uniqueish string.
+ """
+ cre = re.compile(clean)
+ parts = []
+ if prefix:
+ parts.append(cre.sub(sep, prefix))
+ parts.append(''.join(random.choice(unique_chars)
+ for _ in range(unique_len)))
+ if suffix:
+ parts.append(cre.sub(sep, suffix))
+ return sep.join(parts)
+
+
+def extract_method_name(full_name, keep_class=False):
+ """Given a full test name as returned from TestCase.id() return
+ just the method part or class.method.
+
+ Args:
+ full_name (str): Dot separated name of test.
+ keep_class (str): Retain the class name, if false only the
+ method name will be returned.
+ Returns:
+ str: Method name or class.method_name.
+ """
+ offset = -1
+ if keep_class:
+ offset = -2
+ return '.'.join(full_name.split('.')[offset:])
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
new file mode 100644
index 00000000..295dc42b
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -0,0 +1,1507 @@
+"""Library for openshift operations.
+
+Various utility functions for interacting with OCP/OpenShift.
+"""
+
+import base64
+import json
+import re
+import types
+
+from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops
+import mock
+import yaml
+
+from openshiftstoragelibs import command
+from openshiftstoragelibs import exceptions
+from openshiftstoragelibs import openshift_version
+from openshiftstoragelibs import utils
+from openshiftstoragelibs import waiter
+from openshiftstoragelibs.heketi_ops import (
+ heketi_blockvolume_info,
+ heketi_volume_info,
+)
+
+PODS_WIDE_RE = re.compile(
+ r'(\S+)\s+(\S+)\s+(\w+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+).*\n')
+SERVICE_STATUS = "systemctl status %s"
+SERVICE_RESTART = "systemctl restart %s"
+SERVICE_STATUS_REGEX = r"Active: active \((.*)\) since .*;.*"
+
+
+def oc_get_pods(ocp_node, selector=None):
+ """Gets the pods info with 'wide' option in the current project.
+
+ Args:
+ ocp_node (str): Node in which ocp command will be executed.
+ selector (str): optional option. Selector for OCP pods.
+ example: "glusterfs-node=pod" for filtering out only Gluster PODs.
+
+ Returns:
+ dict : dict of pods info in the current project.
+ """
+
+ cmd = "oc get -o wide --no-headers=true pods"
+ if selector:
+ cmd += " --selector %s" % selector
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error("Failed to get ocp pods on node %s" % ocp_node)
+ raise AssertionError('failed to get pods: %r' % (err,))
+ return _parse_wide_pods_output(out)
+
+
+def _parse_wide_pods_output(output):
+ """Parse the output of `oc get -o wide pods`.
+ """
+ # Interestingly, the output of get pods is "cooked" in such a way that
+ # the values in the ready, status, & restart fields are not accessible
+ # from YAML/JSON/templating forcing us to scrape the output for
+ # these values
+ # (at the time of this writing the logic is in
+ # printPodBase in kubernetes/pkg/printers/internalversion/printers.go )
+ # Possibly obvious, but if you don't need those values you can
+ # use the YAML output directly.
+ #
+ # TODO: Add unit tests for this parser
+ pods_info = {}
+ for each_pod_info in PODS_WIDE_RE.findall(output):
+ pods_info[each_pod_info[0]] = {
+ 'ready': each_pod_info[1],
+ 'status': each_pod_info[2],
+ 'restarts': each_pod_info[3],
+ 'age': each_pod_info[4],
+ 'ip': each_pod_info[5],
+ 'node': each_pod_info[6],
+ }
+ return pods_info
+
+
+def oc_get_pods_full(ocp_node):
+ """Gets all the pod info via YAML in the current project.
+
+ Args:
+ ocp_node (str): Node in which ocp command will be executed.
+
+ Returns:
+ dict: The YAML output converted to python objects
+ (a top-level dict)
+ """
+
+ cmd = "oc get -o yaml pods"
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error("Failed to get ocp pods on node %s" % ocp_node)
+ raise AssertionError('failed to get pods: %r' % (err,))
+ return yaml.load(out)
+
+
+def get_ocp_gluster_pod_names(ocp_node):
+ """Gets the gluster pod names in the current project.
+
+ Args:
+ ocp_node (str): Node in which ocp command will be executed.
+
+ Returns:
+ list : list of gluster pod names in the current project.
+ Empty list, if there are no gluster pods.
+
+ Example:
+ get_ocp_gluster_pod_names(ocp_node)
+ """
+
+ pod_names = oc_get_pods(ocp_node).keys()
+ return [pod for pod in pod_names if pod.startswith('glusterfs-')]
+
+
+def get_amount_of_gluster_nodes(ocp_node):
+ """Calculate amount of Gluster nodes.
+
+ Args:
+ ocp_node (str): node to run 'oc' commands on.
+ Returns:
+ Integer value as amount of either GLuster PODs or Gluster nodes.
+ """
+ # Containerized Gluster
+ gluster_pods = get_ocp_gluster_pod_names(ocp_node)
+ if gluster_pods:
+ return len(gluster_pods)
+
+ # Standalone Gluster
+ configured_gluster_nodes = len(g.config.get("gluster_servers", {}))
+ if configured_gluster_nodes:
+ return configured_gluster_nodes
+
+ raise exceptions.ConfigError(
+ "Haven't found neither Gluster PODs nor Gluster nodes.")
+
+
+def switch_oc_project(ocp_node, project_name):
+ """Switch to the given project.
+
+ Args:
+ ocp_node (str): Node in which ocp command will be executed.
+ project_name (str): Project name.
+ Returns:
+ bool : True on switching to given project.
+ False otherwise
+
+ Example:
+ switch_oc_project(ocp_node, "storage-project")
+ """
+
+ cmd = "oc project %s" % project_name
+ ret, _, _ = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error("Failed to switch to project %s" % project_name)
+ return False
+ return True
+
+
+def oc_rsync(ocp_node, pod_name, src_dir_path, dest_dir_path):
+ """Sync file from 'src_dir_path' path on ocp_node to
+ 'dest_dir_path' path on 'pod_name' using 'oc rsync' command.
+
+ Args:
+ ocp_node (str): Node on which oc rsync command will be executed
+ pod_name (str): Name of the pod on which source directory to be
+ mounted
+ src_dir_path (path): Source path from which directory to be mounted
+ dest_dir_path (path): destination path to which directory to be
+ mounted
+ """
+ ret, out, err = g.run(ocp_node, ['oc',
+ 'rsync',
+ src_dir_path,
+ '%s:%s' % (pod_name, dest_dir_path)])
+ if ret != 0:
+ error_msg = 'failed to sync directory in pod: %r; %r' % (out, err)
+ g.log.error(error_msg)
+ raise AssertionError(error_msg)
+
+
+def oc_rsh(ocp_node, pod_name, command, log_level=None):
+ """Run a command in the ocp pod using `oc rsh`.
+
+ Args:
+ ocp_node (str): Node on which oc rsh command will be executed.
+ pod_name (str): Name of the pod on which the command will
+ be executed.
+ command (str|list): command to run.
+ log_level (str|None): log level to be passed to glusto's run
+ method.
+
+ Returns:
+ A tuple consisting of the command return code, stdout, and stderr.
+ """
+ prefix = ['oc', 'rsh', pod_name]
+ if isinstance(command, types.StringTypes):
+ cmd = ' '.join(prefix + [command])
+ else:
+ cmd = prefix + command
+
+ # unpack the tuple to make sure our return value exactly matches
+ # our docstring
+ ret, stdout, stderr = g.run(ocp_node, cmd, log_level=log_level)
+ return (ret, stdout, stderr)
+
+
+def oc_create(ocp_node, value, value_type='file'):
+ """Create a resource based on the contents of the given file name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run
+ value (str): Filename (on remote) or file data
+ to be passed to oc create command.
+ value_type (str): either 'file' or 'stdin'.
+ Raises:
+ AssertionError: Raised when resource fails to create.
+ """
+ if value_type == 'file':
+ cmd = ['oc', 'create', '-f', value]
+ else:
+ cmd = ['echo', '\'%s\'' % value, '|', 'oc', 'create', '-f', '-']
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ msg = 'Failed to create resource: %r; %r' % (out, err)
+ g.log.error(msg)
+ raise AssertionError(msg)
+ g.log.info('Created resource from %s.' % value_type)
+
+
+def oc_process(ocp_node, params, filename):
+ """Create a resource template based on the contents of the
+ given filename and params provided.
+ Args:
+ ocp_node (str): Node on which the ocp command will run
+ filename (str): Filename (on remote) to be passed to
+ oc process command.
+ Returns: template generated through process command
+ Raises:
+ AssertionError: Raised when resource fails to create.
+ """
+
+ ret, out, err = g.run(ocp_node, ['oc', 'process', '-f', filename, params])
+ if ret != 0:
+ error_msg = 'failed to create process: %r; %r' % (out, err)
+ g.log.error(error_msg)
+ raise AssertionError(error_msg)
+ g.log.info('Created resource from file (%s)', filename)
+
+ return out
+
+
+def oc_create_secret(hostname, secret_name_prefix="autotests-secret-",
+ namespace="default",
+ data_key="password",
+ secret_type="kubernetes.io/glusterfs"):
+ """Create secret using data provided as stdin input.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ secret_name_prefix (str): secret name will consist of this prefix and
+ random str.
+ namespace (str): name of a namespace to create a secret in
+ data_key (str): plain text value for secret which will be transformed
+ into base64 string automatically.
+ secret_type (str): type of the secret, which will be created.
+ Returns: name of a secret
+ """
+ secret_name = "%s-%s" % (secret_name_prefix, utils.get_random_str())
+ secret_data = json.dumps({
+ "apiVersion": "v1",
+ "data": {"key": base64.b64encode(data_key)},
+ "kind": "Secret",
+ "metadata": {
+ "name": secret_name,
+ "namespace": namespace,
+ },
+ "type": secret_type,
+ })
+ oc_create(hostname, secret_data, 'stdin')
+ return secret_name
+
+
+def oc_create_sc(hostname, sc_name_prefix="autotests-sc",
+ provisioner="kubernetes.io/glusterfs",
+ allow_volume_expansion=False,
+ reclaim_policy="Delete", **parameters):
+ """Create storage class using data provided as stdin input.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ sc_name_prefix (str): sc name will consist of this prefix and
+ random str.
+ provisioner (str): name of the provisioner
+ allow_volume_expansion (bool): Set it to True if need to allow
+ volume expansion.
+ Kvargs:
+ All the keyword arguments are expected to be key and values of
+ 'parameters' section for storage class.
+ """
+ allowed_parameters = (
+ 'resturl', 'secretnamespace', 'restuser', 'secretname',
+ 'restauthenabled', 'restsecretnamespace', 'restsecretname',
+ 'hacount', 'clusterids', 'chapauthenabled', 'volumenameprefix',
+ 'volumeoptions', 'volumetype'
+ )
+ for parameter in parameters.keys():
+ if parameter.lower() not in allowed_parameters:
+ parameters.pop(parameter)
+ sc_name = "%s-%s" % (sc_name_prefix, utils.get_random_str())
+ sc_data = json.dumps({
+ "kind": "StorageClass",
+ "apiVersion": "storage.k8s.io/v1",
+ "metadata": {"name": sc_name},
+ "provisioner": provisioner,
+ "reclaimPolicy": reclaim_policy,
+ "parameters": parameters,
+ "allowVolumeExpansion": allow_volume_expansion,
+ })
+ oc_create(hostname, sc_data, 'stdin')
+ return sc_name
+
+
+def oc_create_pvc(hostname, sc_name=None, pvc_name_prefix="autotests-pvc",
+ pvc_size=1):
+ """Create PVC using data provided as stdin input.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ sc_name (str): name of a storage class to create PVC in.
+ pvc_name_prefix (str): PVC name will consist of this prefix and
+ random str.
+ pvc_size (int/str): size of PVC in Gb
+ """
+ pvc_name = "%s-%s" % (pvc_name_prefix, utils.get_random_str())
+ metadata = {"name": pvc_name}
+ if sc_name:
+ metadata["annotations"] = {
+ "volume.kubernetes.io/storage-class": sc_name,
+ "volume.beta.kubernetes.io/storage-class": sc_name,
+ }
+ pvc_data = json.dumps({
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": metadata,
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": {"requests": {"storage": "%sGi" % pvc_size}}
+ },
+ })
+ oc_create(hostname, pvc_data, 'stdin')
+ return pvc_name
+
+
+def oc_create_app_dc_with_io(
+ hostname, pvc_name, dc_name_prefix="autotests-dc-with-app-io",
+ replicas=1, space_to_use=1048576):
+ """Create DC with app PODs and attached PVC, constantly running I/O.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ pvc_name (str): name of the Persistent Volume Claim to attach to
+ the application PODs where constant I/O will run.
+ dc_name_prefix (str): DC name will consist of this prefix and
+ random str.
+ replicas (int): amount of application POD replicas.
+ space_to_use (int): value in bytes which will be used for I/O.
+ """
+ dc_name = "%s-%s" % (dc_name_prefix, utils.get_random_str())
+ container_data = {
+ "name": dc_name,
+ "image": "cirros",
+ "volumeMounts": [{"mountPath": "/mnt", "name": dc_name}],
+ "command": ["sh"],
+ "args": [
+ "-ec",
+ "trap \"rm -f /mnt/random-data-$HOSTNAME.log ; exit 0\" SIGTERM; "
+ "while true; do "
+ " (mount | grep '/mnt') && "
+ " (head -c %s < /dev/urandom > /mnt/random-data-$HOSTNAME.log) ||"
+ " exit 1; "
+ " sleep 1 ; "
+ "done" % space_to_use,
+ ],
+ "livenessProbe": {
+ "initialDelaySeconds": 3,
+ "periodSeconds": 3,
+ "exec": {"command": [
+ "sh", "-ec",
+ "mount | grep '/mnt' && "
+ " head -c 1 < /dev/urandom >> /mnt/random-data-$HOSTNAME.log"
+ ]},
+ },
+ }
+ dc_data = json.dumps({
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {"name": dc_name},
+ "spec": {
+ "replicas": replicas,
+ "triggers": [{"type": "ConfigChange"}],
+ "paused": False,
+ "revisionHistoryLimit": 2,
+ "template": {
+ "metadata": {"labels": {"name": dc_name}},
+ "spec": {
+ "restartPolicy": "Always",
+ "volumes": [{
+ "name": dc_name,
+ "persistentVolumeClaim": {"claimName": pvc_name},
+ }],
+ "containers": [container_data],
+ "terminationGracePeriodSeconds": 20,
+ }
+ }
+ }
+ })
+ oc_create(hostname, dc_data, 'stdin')
+ return dc_name
+
+
+def oc_create_tiny_pod_with_volume(hostname, pvc_name, pod_name_prefix='',
+ mount_path='/mnt'):
+ """Create tiny POD from image in 10Mb with attached volume at /mnt"""
+ pod_name = "%s-%s" % (pod_name_prefix, utils.get_random_str())
+ pod_data = json.dumps({
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "name": pod_name,
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 20,
+ "containers": [{
+ "name": pod_name,
+ "image": "cirros", # noqa: 10 Mb! linux image
+ "volumeMounts": [{"mountPath": mount_path, "name": "vol"}],
+ "command": [
+ "/bin/sh", "-ec",
+ "trap 'exit 0' SIGTERM ; "
+ "while :; do echo '.'; sleep 5 ; done",
+ ]
+ }],
+ "volumes": [{
+ "name": "vol",
+ "persistentVolumeClaim": {"claimName": pvc_name},
+ }],
+ "restartPolicy": "Never",
+ }
+ })
+ oc_create(hostname, pod_data, 'stdin')
+ return pod_name
+
+
+def oc_delete(ocp_node, rtype, name, raise_on_absence=True):
+ """Delete an OCP resource by name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ name (str): Name of the resource to delete.
+ raise_on_absence (bool): if resource absent raise
+ exception if value is true,
+ else return
+ default value: True
+ """
+ if not oc_get_yaml(ocp_node, rtype, name,
+ raise_on_error=raise_on_absence):
+ return
+ cmd = ['oc', 'delete', rtype, name]
+ if openshift_version.get_openshift_version() >= '3.11':
+ cmd.append('--wait=false')
+
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error('Failed to delete resource: %s, %s: %r; %r',
+ rtype, name, out, err)
+ raise AssertionError('failed to delete resource: %r; %r' % (out, err))
+ g.log.info('Deleted resource: %r %r', rtype, name)
+
+
+def oc_get_custom_resource(ocp_node, rtype, custom, name=None, selector=None,
+ raise_on_error=True):
+ """Get an OCP resource by custom column names.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ custom (str): Name of the custom columm to fetch.
+ name (str|None): Name of the resource to fetch.
+ selector (str|list|None): Column Name or list of column
+ names select to.
+ raise_on_error (bool): If set to true a failure to fetch
+ resource inforation will raise an error, otherwise
+ an empty dict will be returned.
+ Returns:
+ list: List containting data about the resource custom column
+ Raises:
+ AssertionError: Raised when unable to get resource and
+ `raise_on_error` is true.
+ Example:
+ Get all "pvc" with "metadata.name" parameter values:
+ pvc_details = oc_get_custom_resource(
+ ocp_node, "pvc", ":.metadata.name"
+ )
+ """
+ cmd = ['oc', 'get', rtype, '--no-headers']
+
+ cmd.append('-o=custom-columns=%s' % (
+ ','.join(custom) if isinstance(custom, list) else custom))
+
+ if selector:
+ cmd.append('--selector %s' % (
+ ','.join(selector) if isinstance(selector, list) else selector))
+
+ if name:
+ cmd.append(name)
+
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error('Failed to get %s: %s: %r', rtype, name, err)
+ if raise_on_error:
+ raise AssertionError('failed to get %s: %s: %r'
+ % (rtype, name, err))
+ return []
+
+ if name:
+ return filter(None, map(str.strip, (out.strip()).split(' ')))
+ else:
+ out_list = []
+ for line in (out.strip()).split('\n'):
+ out_list.append(filter(None, map(str.strip, line.split(' '))))
+ return out_list
+
+
+def oc_get_yaml(ocp_node, rtype, name=None, raise_on_error=True):
+ """Get an OCP resource by name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ name (str|None): Name of the resource to fetch.
+ raise_on_error (bool): If set to true a failure to fetch
+ resource inforation will raise an error, otherwise
+ an empty dict will be returned.
+ Returns:
+ dict: Dictionary containting data about the resource
+ Raises:
+ AssertionError: Raised when unable to get resource and
+ `raise_on_error` is true.
+ """
+ cmd = ['oc', 'get', '-oyaml', rtype]
+ if name is not None:
+ cmd.append(name)
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error('Failed to get %s: %s: %r', rtype, name, err)
+ if raise_on_error:
+ raise AssertionError('failed to get %s: %s: %r'
+ % (rtype, name, err))
+ return {}
+ return yaml.load(out)
+
+
+def oc_get_pvc(ocp_node, name):
+ """Get information on a persistant volume claim.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ name (str): Name of the PVC.
+ Returns:
+ dict: Dictionary containting data about the PVC.
+ """
+ return oc_get_yaml(ocp_node, 'pvc', name)
+
+
+def oc_get_pv(ocp_node, name):
+ """Get information on a persistant volume.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ name (str): Name of the PV.
+ Returns:
+ dict: Dictionary containting data about the PV.
+ """
+ return oc_get_yaml(ocp_node, 'pv', name)
+
+
+def oc_get_all_pvs(ocp_node):
+ """Get information on all persistent volumes.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ Returns:
+ dict: Dictionary containting data about the PV.
+ """
+ return oc_get_yaml(ocp_node, 'pv', None)
+
+
+def create_namespace(hostname, namespace):
+ '''
+ This function creates namespace
+ Args:
+ hostname (str): hostname on which we need to
+ create namespace
+ namespace (str): project name
+ Returns:
+ bool: True if successful and if already exists,
+ otherwise False
+ '''
+ cmd = "oc new-project %s" % namespace
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret == 0:
+ g.log.info("new namespace %s successfully created" % namespace)
+ return True
+ output = out.strip().split("\n")[0]
+ if "already exists" in output:
+ g.log.info("namespace %s already exists" % namespace)
+ return True
+ g.log.error("failed to create namespace %s" % namespace)
+ return False
+
+
+def wait_for_resource_absence(ocp_node, rtype, name,
+ interval=5, timeout=300):
+ _waiter = waiter.Waiter(timeout=timeout, interval=interval)
+ resource, pv_name = None, None
+ for w in _waiter:
+ try:
+ resource = oc_get_yaml(ocp_node, rtype, name, raise_on_error=True)
+ except AssertionError:
+ break
+ if rtype == 'pvc':
+ cmd = "oc get pv -o=custom-columns=:.spec.claimRef.name | grep %s" % (
+ name)
+ for w in _waiter:
+ ret, out, err = g.run(ocp_node, cmd, "root")
+ _pv_name = out.strip()
+ if _pv_name and not pv_name:
+ pv_name = _pv_name
+ if ret != 0:
+ break
+ if w.expired:
+ # Gather more info for ease of debugging
+ try:
+ r_events = get_events(ocp_node, obj_name=name)
+ except Exception:
+ r_events = '?'
+ error_msg = (
+ "%s '%s' still exists after waiting for it %d seconds.\n"
+ "Resource info: %s\n"
+ "Resource related events: %s" % (
+ rtype, name, timeout, resource, r_events))
+ if rtype == 'pvc' and pv_name:
+ try:
+ pv_events = get_events(ocp_node, obj_name=pv_name)
+ except Exception:
+ pv_events = '?'
+ error_msg += "\nPV events: %s" % pv_events
+
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+
+def scale_dc_pod_amount_and_wait(hostname, dc_name,
+ pod_amount=1, namespace=None):
+ """Scale amount of PODs for a DC.
+
+ If pod_amount is 0, then wait for it's absence.
+ If pod_amount => 1, then wait for all of a DC PODs to be ready.
+
+ Args:
+ hostname (str): Node on which the ocp command will run
+ dc_name (str): Name of heketi dc
+ pod_amount (int): Number of PODs to scale. Default is 1.
+ namespace (str): Namespace of a DC.
+ """
+ namespace_arg = "--namespace=%s" % namespace if namespace else ""
+ scale_cmd = "oc scale --replicas=%d dc/%s %s" % (
+ pod_amount, dc_name, namespace_arg)
+ command.cmd_run(scale_cmd, hostname=hostname)
+
+ pod_names = get_pod_names_from_dc(hostname, dc_name)
+ for pod_name in pod_names:
+ if pod_amount == 0:
+ wait_for_resource_absence(hostname, 'pod', pod_name)
+ else:
+ wait_for_pod_be_ready(hostname, pod_name)
+ return pod_names
+
+
+def get_gluster_pod_names_by_pvc_name(ocp_node, pvc_name):
+ """Get Gluster POD names, whose nodes store bricks for specified PVC.
+
+ Args:
+ ocp_node (str): Node to execute OCP commands on.
+ pvc_name (str): Name of a PVC to get related Gluster PODs.
+ Returns:
+ list: List of dicts, which consist of following 3 key-value pairs:
+ pod_name=<pod_name_value>,
+ host_name=<host_name_value>,
+ host_ip=<host_ip_value>
+ """
+ # Check storage provisioner
+ sp_cmd = (
+ r'oc get pvc %s --no-headers -o=custom-columns='
+ r':.metadata.annotations."volume\.beta\.kubernetes\.io\/'
+ r'storage\-provisioner"' % pvc_name)
+ sp_raw = command.cmd_run(sp_cmd, hostname=ocp_node)
+ sp = sp_raw.strip()
+
+ # Get node IPs
+ if sp == "kubernetes.io/glusterfs":
+ pv_info = get_gluster_vol_info_by_pvc_name(ocp_node, pvc_name)
+ gluster_pod_nodes_ips = [
+ brick["name"].split(":")[0]
+ for brick in pv_info["bricks"]["brick"]
+ ]
+ elif sp == "gluster.org/glusterblock":
+ get_gluster_pod_node_ip_cmd = (
+ r"""oc get pv --template '{{range .items}}""" +
+ r"""{{if eq .spec.claimRef.name "%s"}}""" +
+ r"""{{.spec.iscsi.targetPortal}}{{" "}}""" +
+ r"""{{.spec.iscsi.portals}}{{end}}{{end}}'""") % (
+ pvc_name)
+ node_ips_raw = command.cmd_run(
+ get_gluster_pod_node_ip_cmd, hostname=ocp_node)
+ node_ips_raw = node_ips_raw.replace(
+ "[", " ").replace("]", " ").replace(",", " ")
+ gluster_pod_nodes_ips = [
+ s.strip() for s in node_ips_raw.split(" ") if s.strip()
+ ]
+ else:
+ assert False, "Unexpected storage provisioner: %s" % sp
+
+ # Get node names
+ get_node_names_cmd = (
+ "oc get node -o wide | grep -e '%s ' | awk '{print $1}'" % (
+ " ' -e '".join(gluster_pod_nodes_ips)))
+ gluster_pod_node_names = command.cmd_run(
+ get_node_names_cmd, hostname=ocp_node)
+ gluster_pod_node_names = [
+ node_name.strip()
+ for node_name in gluster_pod_node_names.split("\n")
+ if node_name.strip()
+ ]
+ node_count = len(gluster_pod_node_names)
+ err_msg = "Expected more than one node hosting Gluster PODs. Got '%s'." % (
+ node_count)
+ assert (node_count > 1), err_msg
+
+ # Get Gluster POD names which are located on the filtered nodes
+ get_pod_name_cmd = (
+ "oc get pods --all-namespaces "
+ "-o=custom-columns=:.metadata.name,:.spec.nodeName,:.status.hostIP | "
+ "grep 'glusterfs-' | grep -e '%s '" % "' -e '".join(
+ gluster_pod_node_names)
+ )
+ out = command.cmd_run(
+ get_pod_name_cmd, hostname=ocp_node)
+ data = []
+ for line in out.split("\n"):
+ pod_name, host_name, host_ip = [
+ el.strip() for el in line.split(" ") if el.strip()]
+ data.append({
+ "pod_name": pod_name,
+ "host_name": host_name,
+ "host_ip": host_ip,
+ })
+ pod_count = len(data)
+ err_msg = "Expected 3 or more Gluster PODs to be found. Actual is '%s'" % (
+ pod_count)
+ assert (pod_count > 2), err_msg
+ return data
+
+
+def cmd_run_on_gluster_pod_or_node(ocp_client_node, cmd, gluster_node=None):
+ """Run shell command on either Gluster PODs or Gluster nodes.
+
+ Args:
+ ocp_client_node (str): Node to execute OCP commands on.
+ cmd (str): shell command to run.
+ gluster_node (str): optional. Allows to chose specific gluster node,
+ keeping abstraction from deployment type. Can be either IP address
+ or node name from "oc get nodes" command.
+ Returns:
+ Output of a shell command as string object.
+ """
+ # Containerized Glusterfs
+ gluster_pods = oc_get_pods(ocp_client_node, selector="glusterfs-node=pod")
+ err_msg = ""
+ if gluster_pods:
+ if gluster_node:
+ for pod_name, pod_data in gluster_pods.items():
+ if gluster_node in (pod_data["ip"], pod_data["node"]):
+ gluster_pod_names = [pod_name]
+ break
+ else:
+ raise exceptions.ExecutionError(
+ "Could not find Gluster PODs with node filter as "
+ "'%s'." % gluster_node)
+ else:
+ gluster_pod_names = gluster_pods.keys()
+
+ for gluster_pod_name in gluster_pod_names:
+ try:
+ pod_cmd = "oc exec %s -- %s" % (gluster_pod_name, cmd)
+ return command.cmd_run(pod_cmd, hostname=ocp_client_node)
+ except Exception as e:
+ err = ("Failed to run '%s' command on '%s' Gluster POD. "
+ "Error: %s\n" % (cmd, gluster_pod_name, e))
+ err_msg += err
+ g.log.error(err)
+ raise exceptions.ExecutionError(err_msg)
+
+ # Standalone Glusterfs
+ if gluster_node:
+ g_hosts = [gluster_node]
+ else:
+ g_hosts = g.config.get("gluster_servers", {}).keys()
+ for g_host in g_hosts:
+ try:
+ return command.cmd_run(cmd, hostname=g_host)
+ except Exception as e:
+ err = ("Failed to run '%s' command on '%s' Gluster node. "
+ "Error: %s\n" % (cmd, g_host, e))
+ err_msg += err
+ g.log.error(err)
+
+ if not err_msg:
+ raise exceptions.ExecutionError(
+ "Haven't found neither Gluster PODs nor Gluster nodes.")
+ raise exceptions.ExecutionError(err_msg)
+
+
+def get_gluster_vol_info_by_pvc_name(ocp_node, pvc_name):
+ """Get Gluster volume info based on the PVC name.
+
+ Args:
+ ocp_node (str): Node to execute OCP commands on.
+ pvc_name (str): Name of a PVC to get bound Gluster volume info.
+ Returns:
+ dict: Dictionary containting data about a Gluster volume.
+ """
+
+ # Get PV ID from PVC
+ get_pvc_cmd = "oc get pvc %s -o=custom-columns=:.spec.volumeName" % (
+ pvc_name)
+ pv_name = command.cmd_run(get_pvc_cmd, hostname=ocp_node)
+ assert pv_name, "PV name should not be empty: '%s'" % pv_name
+
+ # Get volume ID from PV
+ get_pv_cmd = "oc get pv %s -o=custom-columns=:.spec.glusterfs.path" % (
+ pv_name)
+ vol_id = command.cmd_run(get_pv_cmd, hostname=ocp_node)
+ assert vol_id, "Gluster volume ID should not be empty: '%s'" % vol_id
+
+ vol_info_cmd = "gluster v info %s --xml" % vol_id
+ vol_info = cmd_run_on_gluster_pod_or_node(ocp_node, vol_info_cmd)
+
+ # Parse XML output to python dict
+ with mock.patch('glusto.core.Glusto.run', return_value=(0, vol_info, '')):
+ vol_info = volume_ops.get_volume_info(vol_id)
+ vol_info = vol_info[list(vol_info.keys())[0]]
+ vol_info["gluster_vol_id"] = vol_id
+ return vol_info
+
+
+def get_gluster_blockvol_info_by_pvc_name(ocp_node, heketi_server_url,
+ pvc_name):
+ """Get Gluster block volume info based on the PVC name.
+
+ Args:
+ ocp_node (str): Node to execute OCP commands on.
+ heketi_server_url (str): Heketi server url
+ pvc_name (str): Name of a PVC to get bound Gluster block volume info.
+ Returns:
+ dict: Dictionary containting data about a Gluster block volume.
+ """
+
+ # Get block volume Name and ID from PV which is bound to our PVC
+ get_block_vol_data_cmd = (
+ r'oc get pv --no-headers -o custom-columns='
+ r':.metadata.annotations.glusterBlockShare,'
+ r':.metadata.annotations."gluster\.org\/volume\-id",'
+ r':.spec.claimRef.name | grep "%s"' % pvc_name)
+ out = command.cmd_run(get_block_vol_data_cmd, hostname=ocp_node)
+ parsed_out = filter(None, map(str.strip, out.split(" ")))
+ assert len(parsed_out) == 3, "Expected 3 fields in following: %s" % out
+ block_vol_name, block_vol_id = parsed_out[:2]
+
+ # Get block hosting volume ID
+ block_hosting_vol_id = heketi_blockvolume_info(
+ ocp_node, heketi_server_url, block_vol_id, json=True
+ )["blockhostingvolume"]
+
+ # Get block hosting volume name by it's ID
+ block_hosting_vol_name = heketi_volume_info(
+ ocp_node, heketi_server_url, block_hosting_vol_id, json=True)['name']
+
+ # Get Gluster block volume info
+ vol_info_cmd = "gluster-block info %s/%s --json" % (
+ block_hosting_vol_name, block_vol_name)
+ vol_info = cmd_run_on_gluster_pod_or_node(ocp_node, vol_info_cmd)
+
+ return json.loads(vol_info)
+
+
+def wait_for_pod_be_ready(hostname, pod_name,
+ timeout=1200, wait_step=60):
+ '''
+ This funciton waits for pod to be in ready state
+ Args:
+ hostname (str): hostname on which we want to check the pod status
+ pod_name (str): pod_name for which we need the status
+ timeout (int): timeout value,,
+ default value is 1200 sec
+ wait_step( int): wait step,
+ default value is 60 sec
+ Returns:
+ bool: True if pod status is Running and ready state,
+ otherwise Raise Exception
+ '''
+ for w in waiter.Waiter(timeout, wait_step):
+ # command to find pod status and its phase
+ cmd = ("oc get pods %s -o=custom-columns="
+ ":.status.containerStatuses[0].ready,"
+ ":.status.phase") % pod_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ msg = "Failed to execute cmd: %s\nout: %s\nerr: %s" % (
+ cmd, out, err)
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ output = out.strip().split()
+
+ # command to find if pod is ready
+ if output[0] == "true" and output[1] == "Running":
+ g.log.info("pod %s is in ready state and is "
+ "Running" % pod_name)
+ return True
+ elif output[1] == "Error":
+ msg = ("pod %s status error" % pod_name)
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ else:
+ g.log.info("pod %s ready state is %s,"
+ " phase is %s,"
+ " sleeping for %s sec" % (
+ pod_name, output[0],
+ output[1], wait_step))
+ continue
+ if w.expired:
+ err_msg = ("exceeded timeout %s for waiting for pod %s "
+ "to be in ready state" % (timeout, pod_name))
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+
+
+def get_pod_names_from_dc(hostname, dc_name, timeout=180, wait_step=3):
+ """Return list of POD names by their DC.
+
+ Args:
+ hostname (str): hostname on which 'oc' commands will be executed.
+ dc_name (str): deployment_confidg name
+ timeout (int): timeout value. Default value is 180 sec.
+ wait_step( int): Wait step, default value is 3 sec.
+ Returns:
+ list: list of strings which are POD names
+ Raises: exceptions.ExecutionError
+ """
+ get_replicas_amount_cmd = (
+ "oc get dc --no-headers --all-namespaces "
+ "-o=custom-columns=:.spec.replicas,:.metadata.name "
+ "| grep '%s' | awk '{print $1}'" % dc_name)
+ replicas = int(command.cmd_run(
+ get_replicas_amount_cmd, hostname=hostname))
+
+ get_pod_names_cmd = (
+ "oc get pods --all-namespaces -o=custom-columns=:.metadata.name "
+ "--no-headers=true --selector deploymentconfig=%s" % dc_name)
+ for w in waiter.Waiter(timeout, wait_step):
+ out = command.cmd_run(get_pod_names_cmd, hostname=hostname)
+ pod_names = [o.strip() for o in out.split('\n') if o.strip()]
+ if len(pod_names) != replicas:
+ continue
+ g.log.info(
+ "POD names for '%s' DC are '%s'. "
+ "Expected amount of PODs is '%s'.", dc_name, out, replicas)
+ return pod_names
+ if w.expired:
+ err_msg = ("Exceeded %s sec timeout waiting for PODs to appear "
+ "in amount of %s." % (timeout, replicas))
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+
+
+def get_pod_name_from_dc(hostname, dc_name, timeout=180, wait_step=3):
+ return get_pod_names_from_dc(
+ hostname, dc_name, timeout=timeout, wait_step=wait_step)[0]
+
+
+def get_pvc_status(hostname, pvc_name):
+ '''
+ This function verifies the if pod is running
+ Args:
+ hostname (str): hostname on which we want
+ to check the pvc status
+ pvc_name (str): pod_name for which we
+ need the status
+ Returns:
+ bool, status (str): True, status of pvc
+ otherwise False, error message.
+ '''
+ cmd = "oc get pvc | grep %s | awk '{print $2}'" % pvc_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ return False, err
+ output = out.strip().split("\n")[0].strip()
+ return True, output
+
+
+def verify_pvc_status_is_bound(hostname, pvc_name, timeout=120, wait_step=3):
+ """Verify that PVC gets 'Bound' status in required time.
+
+ Args:
+ hostname (str): hostname on which we will execute oc commands
+ pvc_name (str): name of PVC to check status of
+ timeout (int): total time in seconds we are ok to wait
+ for 'Bound' status of a PVC
+ wait_step (int): time in seconds we will sleep before checking a PVC
+ status again.
+ Returns: None
+ Raises: exceptions.ExecutionError in case of errors.
+ """
+ pvc_not_found_counter = 0
+ for w in waiter.Waiter(timeout, wait_step):
+ ret, output = get_pvc_status(hostname, pvc_name)
+ if ret is not True:
+ msg = ("Failed to execute 'get' command for '%s' PVC. "
+ "Got following responce: %s" % (pvc_name, output))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if output == "":
+ g.log.info("PVC '%s' not found, sleeping for %s "
+ "sec." % (pvc_name, wait_step))
+ if pvc_not_found_counter > 0:
+ msg = ("PVC '%s' has not been found 2 times already. "
+ "Make sure you provided correct PVC name." % pvc_name)
+ else:
+ pvc_not_found_counter += 1
+ continue
+ elif output == "Pending":
+ g.log.info("PVC '%s' is in Pending state, sleeping for %s "
+ "sec" % (pvc_name, wait_step))
+ continue
+ elif output == "Bound":
+ g.log.info("PVC '%s' is in Bound state." % pvc_name)
+ return pvc_name
+ elif output == "Error":
+ msg = "PVC '%s' is in 'Error' state." % pvc_name
+ g.log.error(msg)
+ else:
+ msg = "PVC %s has different status - %s" % (pvc_name, output)
+ g.log.error(msg)
+ if msg:
+ raise AssertionError(msg)
+ if w.expired:
+ msg = ("Exceeded timeout of '%s' seconds for verifying PVC '%s' "
+ "to reach the 'Bound' state." % (timeout, pvc_name))
+
+ # Gather more info for ease of debugging
+ try:
+ pvc_events = get_events(hostname, obj_name=pvc_name)
+ except Exception:
+ pvc_events = '?'
+ msg += "\nPVC events: %s" % pvc_events
+
+ g.log.error(msg)
+ raise AssertionError(msg)
+
+
+def resize_pvc(hostname, pvc_name, size):
+ '''
+ Resize PVC
+ Args:
+ hostname (str): hostname on which we want
+ to edit the pvc status
+ pvc_name (str): pod_name for which we
+ edit the storage capacity
+ size (int): size of pvc to change
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc patch pvc %s "
+ "-p='{\"spec\": {\"resources\": {\"requests\": "
+ "{\"storage\": \"%dGi\"}}}}'" % (pvc_name, size))
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ error_msg = ("failed to execute cmd %s "
+ "out- %s err %s" % (cmd, out, err))
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+ g.log.info("successfully edited storage capacity"
+ "of pvc %s . out- %s" % (pvc_name, out))
+ return True
+
+
+def verify_pvc_size(hostname, pvc_name, size,
+ timeout=120, wait_step=5):
+ '''
+ Verify size of PVC
+ Args:
+ hostname (str): hostname on which we want
+ to verify the size of pvc
+ pvc_name (str): pvc_name for which we
+ verify its size
+ size (int): size of pvc
+ timeout (int): timeout value,
+ verifies the size after wait_step
+ value till timeout
+ default value is 120 sec
+ wait_step( int): wait step,
+ default value is 5 sec
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pvc %s -o=custom-columns="
+ ":.spec.resources.requests.storage,"
+ ":.status.capacity.storage" % pvc_name)
+ for w in waiter.Waiter(timeout, wait_step):
+ sizes = command.cmd_run(cmd, hostname=hostname).split()
+ spec_size = int(sizes[0].replace("Gi", ""))
+ actual_size = int(sizes[1].replace("Gi", ""))
+ if spec_size == actual_size == size:
+ g.log.info("verification of pvc %s of size %d "
+ "successful" % (pvc_name, size))
+ return True
+ else:
+ g.log.info("sleeping for %s sec" % wait_step)
+ continue
+
+ err_msg = ("verification of pvc %s size of %d failed -"
+ "spec_size- %d actual_size %d" % (
+ pvc_name, size, spec_size, actual_size))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+def verify_pv_size(hostname, pv_name, size,
+ timeout=120, wait_step=5):
+ '''
+ Verify size of PV
+ Args:
+ hostname (str): hostname on which we want
+ to verify the size of pv
+ pv_name (str): pv_name for which we
+ verify its size
+ size (int): size of pv
+ timeout (int): timeout value,
+ verifies the size after wait_step
+ value till timeout
+ default value is 120 sec
+ wait_step( int): wait step,
+ default value is 5 sec
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pv %s -o=custom-columns=:."
+ "spec.capacity.storage" % pv_name)
+ for w in waiter.Waiter(timeout, wait_step):
+ pv_size = command.cmd_run(cmd, hostname=hostname).split()[0]
+ pv_size = int(pv_size.replace("Gi", ""))
+ if pv_size == size:
+ g.log.info("verification of pv %s of size %d "
+ "successful" % (pv_name, size))
+ return True
+ else:
+ g.log.info("sleeping for %s sec" % wait_step)
+ continue
+
+ err_msg = ("verification of pv %s size of %d failed -"
+ "pv_size- %d" % (pv_name, size, pv_size))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+def get_pv_name_from_pvc(hostname, pvc_name):
+ '''
+ Returns PV name of the corresponding PVC name
+ Args:
+ hostname (str): hostname on which we want
+ to find pv name
+ pvc_name (str): pvc_name for which we
+ want to find corresponding
+ pv name
+ Returns:
+ pv_name (str): pv name if successful,
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pvc %s -o=custom-columns=:."
+ "spec.volumeName" % pvc_name)
+ pv_name = command.cmd_run(cmd, hostname=hostname)
+ g.log.info("pv name is %s for pvc %s" % (
+ pv_name, pvc_name))
+
+ return pv_name
+
+
+def get_vol_names_from_pv(hostname, pv_name):
+ '''
+ Returns the heketi and gluster
+ vol names of the corresponding PV
+ Args:
+ hostname (str): hostname on which we want
+ to find vol names
+ pv_name (str): pv_name for which we
+ want to find corresponding
+ vol names
+ Returns:
+ volname (dict): dict if successful
+ {"heketi_vol": heketi_vol_name,
+ "gluster_vol": gluster_vol_name
+ ex: {"heketi_vol": " xxxx",
+ "gluster_vol": "vol_xxxx"]
+ otherwise raise Exception
+ '''
+ vol_dict = {}
+ cmd = (r"oc get pv %s -o=custom-columns="
+ r":.metadata.annotations."
+ r"'gluster\.kubernetes\.io\/heketi\-volume\-id',"
+ r":.spec.glusterfs.path" % pv_name)
+ vol_list = command.cmd_run(cmd, hostname=hostname).split()
+ vol_dict = {"heketi_vol": vol_list[0],
+ "gluster_vol": vol_list[1]}
+ g.log.info("gluster vol name is %s and heketi vol name"
+ " is %s for pv %s"
+ % (vol_list[1], vol_list[0], pv_name))
+ return vol_dict
+
+
+def get_events(hostname,
+ obj_name=None, obj_namespace=None, obj_type=None,
+ event_reason=None, event_type=None):
+ """Return filtered list of events.
+
+ Args:
+ hostname (str): hostname of oc client
+ obj_name (str): name of an object
+ obj_namespace (str): namespace where object is located
+ obj_type (str): type of an object, i.e. PersistentVolumeClaim or Pod
+ event_reason (str): reason why event was created,
+ i.e. Created, Started, Unhealthy, SuccessfulCreate, Scheduled ...
+ event_type (str): type of an event, i.e. Normal or Warning
+ Returns:
+ List of dictionaries, where the latter are of following structure:
+ {
+ "involvedObject": {
+ "kind": "ReplicationController",
+ "name": "foo-object-name",
+ "namespace": "foo-object-namespace",
+ },
+ "message": "Created pod: foo-object-name",
+ "metadata": {
+ "creationTimestamp": "2018-10-19T18:27:09Z",
+ "name": "foo-object-name.155f15db4e72cc2e",
+ "namespace": "foo-object-namespace",
+ },
+ "reason": "SuccessfulCreate",
+ "reportingComponent": "",
+ "reportingInstance": "",
+ "source": {"component": "replication-controller"},
+ "type": "Normal"
+ }
+ """
+ field_selector = []
+ if obj_name:
+ field_selector.append('involvedObject.name=%s' % obj_name)
+ if obj_namespace:
+ field_selector.append('involvedObject.namespace=%s' % obj_namespace)
+ if obj_type:
+ field_selector.append('involvedObject.kind=%s' % obj_type)
+ if event_reason:
+ field_selector.append('reason=%s' % event_reason)
+ if event_type:
+ field_selector.append('type=%s' % event_type)
+ cmd = "oc get events -o yaml --field-selector %s" % ",".join(
+ field_selector or "''")
+ return yaml.load(command.cmd_run(cmd, hostname=hostname))['items']
+
+
+def wait_for_events(hostname,
+ obj_name=None, obj_namespace=None, obj_type=None,
+ event_reason=None, event_type=None,
+ timeout=120, wait_step=3):
+ """Wait for appearence of specific set of events."""
+ for w in waiter.Waiter(timeout, wait_step):
+ events = get_events(
+ hostname=hostname, obj_name=obj_name, obj_namespace=obj_namespace,
+ obj_type=obj_type, event_reason=event_reason,
+ event_type=event_type)
+ if events:
+ return events
+ if w.expired:
+ err_msg = ("Exceeded %ssec timeout waiting for events." % timeout)
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+
+
+def match_pvc_and_pv(hostname, prefix):
+ """Match OCP PVCs and PVs generated
+
+ Args:
+ hostname (str): hostname of oc client
+ prefix (str): pv prefix used by user at time
+ of pvc creation
+ """
+ pvc_list = sorted([
+ pvc[0]
+ for pvc in oc_get_custom_resource(hostname, "pvc", ":.metadata.name")
+ if pvc[0].startswith(prefix)
+ ])
+
+ pv_list = sorted([
+ pv[0]
+ for pv in oc_get_custom_resource(
+ hostname, "pv", ":.spec.claimRef.name"
+ )
+ if pv[0].startswith(prefix)
+ ])
+
+ if cmp(pvc_list, pv_list) != 0:
+ err_msg = "PVC and PV list match failed"
+ err_msg += "\nPVC list: %s, " % pvc_list
+ err_msg += "\nPV list %s" % pv_list
+ err_msg += "\nDifference: %s" % (set(pvc_list) ^ set(pv_list))
+ raise AssertionError(err_msg)
+
+
+def match_pv_and_heketi_block_volumes(
+ hostname, heketi_block_volumes, pvc_prefix):
+ """Match heketi block volumes and OC PVCs
+
+ Args:
+ hostname (str): hostname on which we want to check heketi
+ block volumes and OCP PVCs
+ heketi_block_volumes (list): list of heketi block volume names
+ pvc_prefix (str): pv prefix given by user at the time of pvc creation
+ """
+ custom_columns = [
+ r':.spec.claimRef.name',
+ r':.metadata.annotations."pv\.kubernetes\.io\/provisioned\-by"',
+ r':.metadata.annotations."gluster\.org\/volume\-id"'
+ ]
+ pv_block_volumes = sorted([
+ pv[2]
+ for pv in oc_get_custom_resource(hostname, "pv", custom_columns)
+ if pv[0].startswith(pvc_prefix) and pv[1] == "gluster.org/glusterblock"
+ ])
+
+ if cmp(pv_block_volumes, heketi_block_volumes) != 0:
+ err_msg = "PV block volumes and Heketi Block volume list match failed"
+ err_msg += "\nPV Block Volumes: %s, " % pv_block_volumes
+ err_msg += "\nHeketi Block volumes %s" % heketi_block_volumes
+ err_msg += "\nDifference: %s" % (set(pv_block_volumes) ^
+ set(heketi_block_volumes))
+ raise AssertionError(err_msg)
+
+
+def check_service_status_on_pod(
+ ocp_client, podname, service, status, timeout=180, wait_step=3):
+ """Check a service state on a pod.
+
+ Args:
+ ocp_client (str): node with 'oc' client
+ podname (str): pod name on which service needs to be checked
+ service (str): service which needs to be checked
+ status (str): status to be checked
+ timeout (int): seconds to wait before service starts having
+ specified 'status'
+ wait_step (int): interval in seconds to wait before checking
+ service again.
+ """
+ err_msg = ("Exceeded timeout of %s sec for verifying %s service to start "
+ "having '%s' status" % (timeout, service, status))
+
+ for w in waiter.Waiter(timeout, wait_step):
+ ret, out, err = oc_rsh(ocp_client, podname, SERVICE_STATUS % service)
+ if ret != 0:
+ err_msg = ("failed to get service %s's status on pod %s" %
+ (service, podname))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+ for line in out.splitlines():
+ status_match = re.search(SERVICE_STATUS_REGEX, line)
+ if status_match and status_match.group(1) == status:
+ return True
+
+ if w.expired:
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+
+
+def wait_for_service_status_on_gluster_pod_or_node(
+ ocp_client, service, status, gluster_node, timeout=180, wait_step=3):
+ """Wait for a service specific status on a Gluster POD or node.
+
+ Args:
+ ocp_client (str): hostname on which we want to check service
+ service (str): target service to be checked
+ status (str): service status which we wait for
+ gluster_node (str): Gluster node IPv4 which stores either Gluster POD
+ or Gluster services directly.
+ timeout (int): seconds to wait before service starts having
+ specified 'status'
+ wait_step (int): interval in seconds to wait before checking
+ service again.
+ """
+ err_msg = ("Exceeded timeout of %s sec for verifying %s service to start "
+ "having '%s' status" % (timeout, service, status))
+
+ for w in waiter.Waiter(timeout, wait_step):
+ out = cmd_run_on_gluster_pod_or_node(
+ ocp_client, SERVICE_STATUS % service, gluster_node)
+ for line in out.splitlines():
+ status_match = re.search(SERVICE_STATUS_REGEX, line)
+ if status_match and status_match.group(1) == status:
+ return True
+ if w.expired:
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+
+
+def restart_service_on_gluster_pod_or_node(ocp_client, service, gluster_node):
+ """Restart service on Gluster either POD or node.
+
+ Args:
+ ocp_client (str): host on which we want to run 'oc' commands.
+ service (str): service which needs to be restarted
+ gluster_node (str): Gluster node IPv4 which stores either Gluster POD
+ or Gluster services directly.
+ Raises:
+ AssertionError in case restart of a service fails.
+ """
+ cmd_run_on_gluster_pod_or_node(
+ ocp_client, SERVICE_RESTART % service, gluster_node)
+
+
+def oc_adm_manage_node(
+ ocp_client, operation, nodes=None, node_selector=None):
+ """Manage common operations on nodes for administrators.
+
+ Args:
+ ocp_client (str): host on which we want to run 'oc' commands.
+ operations (str):
+ eg. --schedulable=true.
+ nodes (list): list of nodes to manage.
+ node_selector (str): selector to select the nodes.
+ Note: 'nodes' and 'node_selector' are are mutually exclusive.
+ Only either of them should be passed as parameter not both.
+ Returns:
+ str: In case of success.
+ Raises:
+ AssertionError: In case of any failures.
+ """
+
+ if (not nodes) == (not node_selector):
+ raise AssertionError(
+ "'nodes' and 'node_selector' are mutually exclusive. "
+ "Only either of them should be passed as parameter not both.")
+
+ cmd = "oc adm manage-node %s" % operation
+ if node_selector:
+ cmd += " --selector %s" % node_selector
+ else:
+ node = ' '.join(nodes)
+ cmd += " " + node
+
+ return command.cmd_run(cmd, ocp_client)
+
+
+def oc_get_schedulable_nodes(ocp_client):
+ """Get the list of schedulable nodes.
+
+ Args:
+ ocp_client (str): host on which we want to run 'oc' commands.
+
+ Returns:
+ list: list of nodes if present.
+ Raises:
+ AssertionError: In case of any failures.
+ """
+ cmd = ("oc get nodes --field-selector=spec.unschedulable!=true "
+ "-o=custom-columns=:.metadata.name,:.spec.taints[*].effect "
+ "--no-headers | awk '!/NoSchedule/{print $1}'")
+
+ out = command.cmd_run(cmd, ocp_client)
+
+ return out.split('\n') if out else out
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py b/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py
new file mode 100644
index 00000000..4d2b4f61
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py
@@ -0,0 +1,228 @@
+from glusto.core import Glusto as g
+import yaml
+
+from openshiftstoragelibs.command import cmd_run
+from openshiftstoragelibs.exceptions import (
+ ExecutionError,
+ NotSupportedException,
+)
+from openshiftstoragelibs.openshift_version import get_openshift_version
+
+
+MASTER_CONFIG_FILEPATH = "/etc/origin/master/master-config.yaml"
+
+
+def validate_multipath_pod(hostname, podname, hacount, mpath=""):
+ '''
+ This function validates multipath for given app-pod
+ Args:
+ hostname (str): ocp master node name
+ podname (str): app-pod name for which we need to validate
+ multipath. ex : nginx1
+ hacount (int): multipath count or HA count. ex: 3
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ cmd = "oc get pods -o wide | grep %s | awk '{print $7}'" % podname
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0 or out == "":
+ g.log.error("failed to exectute cmd %s on %s, err %s"
+ % (cmd, hostname, out))
+ return False
+ pod_nodename = out.strip()
+ active_node_count = 1
+ enable_node_count = hacount - 1
+ cmd = "multipath -ll %s | grep 'status=active' | wc -l" % mpath
+ ret, out, err = g.run(pod_nodename, cmd, "root")
+ if ret != 0 or out == "":
+ g.log.error("failed to exectute cmd %s on %s, err %s"
+ % (cmd, pod_nodename, out))
+ return False
+ active_count = int(out.strip())
+ if active_node_count != active_count:
+ g.log.error("active node count on %s for %s is %s and not 1"
+ % (pod_nodename, podname, active_count))
+ return False
+ cmd = "multipath -ll %s | grep 'status=enabled' | wc -l" % mpath
+ ret, out, err = g.run(pod_nodename, cmd, "root")
+ if ret != 0 or out == "":
+ g.log.error("failed to exectute cmd %s on %s, err %s"
+ % (cmd, pod_nodename, out))
+ return False
+ enable_count = int(out.strip())
+ if enable_node_count != enable_count:
+ g.log.error("passive node count on %s for %s is %s "
+ "and not %s" % (
+ pod_nodename, podname, enable_count,
+ enable_node_count))
+ return False
+
+ g.log.info("validation of multipath for %s is successfull"
+ % podname)
+ return True
+
+
+def enable_pvc_resize(master_node):
+ '''
+ This function edits the /etc/origin/master/master-config.yaml
+ file - to enable pv_resize feature
+ and restarts atomic-openshift service on master node
+ Args:
+ master_node (str): hostname of masternode on which
+ want to edit the
+ master-config.yaml file
+ Returns:
+ bool: True if successful,
+ otherwise raise Exception
+ '''
+ version = get_openshift_version()
+ if version < "3.9":
+ msg = ("pv resize is not available in openshift "
+ "version %s " % version)
+ g.log.error(msg)
+ raise NotSupportedException(msg)
+
+ try:
+ conn = g.rpyc_get_connection(master_node, user="root")
+ if conn is None:
+ err_msg = ("Failed to get rpyc connection of node %s"
+ % master_node)
+ g.log.error(err_msg)
+ raise ExecutionError(err_msg)
+
+ with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f:
+ data = yaml.load(f)
+ dict_add = data['admissionConfig']['pluginConfig']
+ if "PersistentVolumeClaimResize" in dict_add:
+ g.log.info("master-config.yaml file is already edited")
+ return True
+ dict_add['PersistentVolumeClaimResize'] = {
+ 'configuration': {
+ 'apiVersion': 'v1',
+ 'disable': 'false',
+ 'kind': 'DefaultAdmissionConfig'}}
+ data['admissionConfig']['pluginConfig'] = dict_add
+ kube_config = data['kubernetesMasterConfig']
+ for key in ('apiServerArguments', 'controllerArguments'):
+ kube_config[key] = (
+ kube_config.get(key)
+ if isinstance(kube_config.get(key), dict) else {})
+ value = ['ExpandPersistentVolumes=true']
+ kube_config[key]['feature-gates'] = value
+ with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f:
+ yaml.dump(data, f, default_flow_style=False)
+ except Exception as err:
+ raise ExecutionError("failed to edit master-config.yaml file "
+ "%s on %s" % (err, master_node))
+ finally:
+ g.rpyc_close_connection(master_node, user="root")
+
+ g.log.info("successfully edited master-config.yaml file "
+ "%s" % master_node)
+ if version == "3.9":
+ cmd = ("systemctl restart atomic-openshift-master-api "
+ "atomic-openshift-master-controllers")
+ else:
+ cmd = ("/usr/local/bin/master-restart api && "
+ "/usr/local/bin/master-restart controllers")
+ ret, out, err = g.run(master_node, cmd, "root")
+ if ret != 0:
+ err_msg = "Failed to execute cmd %s on %s\nout: %s\nerr: %s" % (
+ cmd, master_node, out, err)
+ g.log.error(err_msg)
+ raise ExecutionError(err_msg)
+
+ return True
+
+
+def get_iscsi_session(node, iqn=None, raise_on_error=True):
+ """Get the list of ip's of iscsi sessions.
+
+ Args:
+ node (str): where we want to run the command.
+ iqn (str): name of iqn.
+ Returns:
+ list: list of session ip's.
+ raises:
+ ExecutionError: In case of any failure if raise_on_error=True.
+ """
+
+ cmd = "set -o pipefail && ((iscsiadm -m session"
+ if iqn:
+ cmd += " | grep %s" % iqn
+ cmd += ") | awk '{print $3}' | cut -d ':' -f 1)"
+
+ out = cmd_run(cmd, node, raise_on_error=raise_on_error)
+
+ return out.split("\n") if out else out
+
+
+def get_iscsi_block_devices_by_path(node, iqn=None, raise_on_error=True):
+ """Get list of iscsiadm block devices from path.
+
+ Args:
+ node (str): where we want to run the command.
+ iqn (str): name of iqn.
+ returns:
+ dictionary: block devices and there ips.
+ raises:
+ ExecutionError: In case of any failure if raise_on_error=True.
+ """
+ cmd = "set -o pipefail && ((ls --format=context /dev/disk/by-path/ip*"
+ if iqn:
+ cmd += " | grep %s" % iqn
+ cmd += ") | awk -F '/|:|-' '{print $10,$25}')"
+
+ out = cmd_run(cmd, node, raise_on_error=raise_on_error)
+
+ if not out:
+ return out
+
+ out_dic = {}
+ for i in out.split("\n"):
+ ip, device = i.strip().split(" ")
+ out_dic[device] = ip
+
+ return out_dic
+
+
+def get_mpath_name_from_device_name(node, device, raise_on_error=True):
+ """Get name of mpath device form block device
+
+ Args:
+ node (str): where we want to run the command.
+ device (str): for which we have to find mpath.
+ Returns:
+ str: name of device
+ Raises:
+ ExecutionError: In case of any failure if raise_on_error=True.
+ """
+ cmd = ("set -o pipefail && ((lsblk -n --list --output=NAME /dev/%s)"
+ " | tail -1)" % device)
+
+ return cmd_run(cmd, node, raise_on_error=raise_on_error)
+
+
+def get_active_and_enabled_devices_from_mpath(node, mpath):
+ """Get active and enabled devices from mpath name.
+
+ Args:
+ node (str): where we want to run the command.
+ mpath (str): name of mpath for which we have to find devices.
+ Returns:
+ dictionary: devices info
+ Raises:
+ ExecutionError: In case of any failure
+ """
+
+ cmd = ("set -o pipefail && ((multipath -ll %s | grep -A 1 status=%s)"
+ " | cut -d ':' -f 4 | awk '{print $2}')")
+
+ active = cmd_run(cmd % (mpath, 'active'), node).split('\n')[1::2]
+ enabled = cmd_run(cmd % (mpath, 'enabled'), node).split('\n')[1::2]
+
+ out_dic = {
+ 'active': active,
+ 'enabled': enabled}
+ return out_dic
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_version.py b/openshift-storage-libs/openshiftstoragelibs/openshift_version.py
new file mode 100644
index 00000000..bc0c9be0
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_version.py
@@ -0,0 +1,173 @@
+"""
+Use this module for any OpenShift version comparisons.
+
+Usage example:
+
+ # Assume OpenShift version is '3.10.45'. Then we have following:
+ from openshiftstoragelibs import openshift_version
+ version = openshift_version.get_openshift_version()
+ if version < '3.10':
+ # False
+ if version <= '3.10':
+ # True
+ if version < '3.10.46':
+ # True
+ if version < '3.10.13':
+ # False
+ if '3.9' < version <= '3.11':
+ # True
+
+Notes:
+- If one of comparison operands has empty/zero-like 'micro' part of version,
+ then it is ignored during comparison, where only 'major' and 'minor' parts of
+ the OpenShift versions are used.
+
+"""
+import re
+
+from glusto.core import Glusto as g
+import six
+
+from openshiftstoragelibs import exceptions
+
+
+OPENSHIFT_VERSION_RE = r"(?:v?)(\d+)(?:\.)(\d+)(?:\.(\d+))?$"
+OPENSHIFT_VERSION = None
+
+
+def _get_openshift_version_str(hostname=None):
+ """Gets OpenShift version from 'oc version' command.
+
+ Args:
+ hostname (str): Node on which the ocp command should run.
+ Returns:
+ str : oc version, i.e. 'v3.10.47'
+ Raises: 'exceptions.ExecutionError' if failed to get version
+ """
+ if not hostname:
+ hostname = list(g.config['ocp_servers']['client'].keys())[0]
+ cmd = "oc version | grep openshift | cut -d ' ' -f 2"
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ msg = "Failed to get oc version. \n'err': %s\n 'out': %s" % (err, out)
+ g.log.error(msg)
+ raise AssertionError(msg)
+ out = out.strip()
+ if not out:
+ error_msg = "Empty output from 'oc version' command: '%s'" % out
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+ return out
+
+
+def _parse_openshift_version(openshift_version_str):
+ """Parses OpenShift version str into tuple of 3 values.
+
+ Args:
+ openshift_version_str (str): OpenShift version like '3.10' or '3.10.45'
+ Returns:
+ Tuple object of 3 values - major, minor and micro version parts.
+ """
+ groups = re.findall(OPENSHIFT_VERSION_RE, openshift_version_str)
+ err_msg = (
+ "Failed to parse '%s' str into 3 OpenShift version parts - "
+ "'major', 'minor' and 'micro'. "
+ "Expected value like '3.10' or '3.10.45'" % openshift_version_str)
+ assert groups, err_msg
+ assert len(groups) == 1, err_msg
+ assert len(groups[0]) == 3, err_msg
+ return (int(groups[0][0]), int(groups[0][1]), int(groups[0][2] or 0))
+
+
+class OpenshiftVersion(object):
+ """Eases OpenShift versions comparison.
+
+ Instance of this class can be used for comparison with other instance of
+ it or to string-like objects.
+
+ Input str version is required to have, at least, 2 version parts -
+ 'major' and 'minor'. Third part is optional - 'micro' version.
+ Examples: '3.10', 'v3.10', '3.10.45', 'v3.10.45'.
+
+ Before each comparison, both operands are checked for zero value in 'micro'
+ part. If one or both are false, then 'micro' part not used for comparison.
+
+ Usage example (1) - compare to string object:
+ version_3_10 = OpenshiftVersion('3.10')
+ cmp_result = '3.9' < version_3_10 <= '3.11'
+
+ Usage example (2) - compare to the same type of an object:
+ version_3_10 = OpenshiftVersion('3.10')
+ version_3_11 = OpenshiftVersion('3.11')
+ cmp_result = version_3_10 < version_3_11
+ """
+ def __init__(self, openshift_version_str):
+ self.v = _parse_openshift_version(openshift_version_str)
+ self.major, self.minor, self.micro = self.v
+
+ def _adapt_other(self, other):
+ if isinstance(other, six.string_types):
+ return OpenshiftVersion(other)
+ elif isinstance(other, OpenshiftVersion):
+ return other
+ else:
+ raise NotImplementedError(
+ "'%s' type is not supported for OpenShift version "
+ "comparison." % type(other))
+
+ def __lt__(self, other):
+ adapted_other = self._adapt_other(other)
+ if not all((self.micro, adapted_other.micro)):
+ return self.v[0:2] < adapted_other.v[0:2]
+ return self.v < adapted_other.v
+
+ def __le__(self, other):
+ adapted_other = self._adapt_other(other)
+ if not all((self.micro, adapted_other.micro)):
+ return self.v[0:2] <= adapted_other.v[0:2]
+ return self.v <= adapted_other.v
+
+ def __eq__(self, other):
+ adapted_other = self._adapt_other(other)
+ if not all((self.micro, adapted_other.micro)):
+ return self.v[0:2] == adapted_other.v[0:2]
+ return self.v == adapted_other.v
+
+ def __ge__(self, other):
+ adapted_other = self._adapt_other(other)
+ if not all((self.micro, adapted_other.micro)):
+ return self.v[0:2] >= adapted_other.v[0:2]
+ return self.v >= adapted_other.v
+
+ def __gt__(self, other):
+ adapted_other = self._adapt_other(other)
+ if not all((self.micro, adapted_other.micro)):
+ return self.v[0:2] > adapted_other.v[0:2]
+ return self.v > adapted_other.v
+
+ def __ne__(self, other):
+ adapted_other = self._adapt_other(other)
+ if not all((self.micro, adapted_other.micro)):
+ return self.v[0:2] != adapted_other.v[0:2]
+ return self.v != adapted_other.v
+
+
+def get_openshift_version(hostname=None):
+ """Cacher of an OpenShift version.
+
+ Version of an OpenShift cluster is constant value. So, we call API just
+ once and then reuse it's output.
+
+ Args:
+ hostname (str): a node with 'oc' client where command should run on.
+ If not specified, then first key
+ from 'ocp_servers.client' config option will be picked up.
+ Returns:
+ OpenshiftVersion object instance.
+ """
+ global OPENSHIFT_VERSION
+ if not OPENSHIFT_VERSION:
+ version_str = _get_openshift_version_str(hostname=hostname)
+ OPENSHIFT_VERSION = OpenshiftVersion(version_str)
+ return OPENSHIFT_VERSION
diff --git a/openshift-storage-libs/openshiftstoragelibs/podcmd.py b/openshift-storage-libs/openshiftstoragelibs/podcmd.py
new file mode 100644
index 00000000..bf84a8b9
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/podcmd.py
@@ -0,0 +1,141 @@
+"""Convenience wrappers for running commands within a pod
+
+The goal of this module is to support running glusto commands in pods
+without a lot of boilerplate and hassle. The basic idea is that we
+have our own run() function that can be addressed to a pod using the
+Pod object (namedtuple). This run function will work like a normal
+g.run() when not using the Pod object.
+
+Example:
+ >>> run("my-host.local", ["parted", "/dev/sda", "p"])
+ 0, "<...stdout...>", "<...stderr...>"
+
+ >>> run(Pod("my-host.local", "my-pod-426ln"),
+ ... ["pvs"])
+ 0, "<...stdout...>", "<...stderr...>"
+
+In addition, if there's a need to to use some higher level functions
+that directly call into glusto run we can monkey-patch the glusto object
+using the GlustoPod context manager. GlustoPod can also be used as a
+decorator.
+
+Imagine a function that direcly calls g.run:
+ >>> def get_magic_number(host, ticket):
+ ... s, out, _ = g.run(host, ['magicall', '--ticket', ticket])
+ ... if s != 0:
+ ... return None
+ ... return out.strip()
+
+If you want to have this operate within a pod you can use the GlustoPod
+manager to enable the pod-aware run method and pass it a Pod object
+as the first argument. Example:
+ >>> def check_magic_number(ticket):
+ ... with GlustoPod():
+ ... m = get_magic_number(Pod('myhost', 'mypod'), ticket)
+ ... return m > 42
+
+Similarly it can be used as a context manager:
+ >>> @GlustoPod()
+ ... def funky(x):
+ ... m = get_magic_number(Pod('myhost', 'mypod'), ticket)
+ ... return m > 42
+
+Because the custom run fuction only runs commands in pods when passed
+a Pod object it is fairly safe to enable the monkey-patch over the
+lifetime of a function that addresses both hosts and pods.
+"""
+
+from collections import namedtuple
+from functools import partial, wraps
+import types
+
+from glusto.core import Glusto as g
+
+from openshiftstoragelibs import openshift_ops
+
+# Define a namedtuple that allows us to address pods instead of just
+# hosts,
+Pod = namedtuple('Pod', 'node podname')
+
+
+def run(target, command, log_level=None, orig_run=g.run):
+ """Function that runs a command on a host or in a pod via a host.
+ Wraps glusto's run function.
+
+ Args:
+ target (str|Pod): If target is str object and
+ it equals to 'auto_get_gluster_endpoint', then
+ Gluster endpoint gets autocalculated to be any of
+ Gluster PODs or nodes depending on the deployment type of
+ a Gluster cluster.
+ If it is str object with other value, then it is considered to be
+ an endpoint for command.
+ If 'target' is of the 'Pod' type,
+ then command will run on the specified POD.
+ command (str|list): Command to run.
+ log_level (str|None): log level to be passed on to glusto's
+ run method
+ orig_run (function): The default implementation of the
+ run method. Will be used when target is not a pod.
+
+ Returns:
+ A tuple of the command's return code, stdout, and stderr.
+ """
+ # NOTE: orig_run captures the glusto run method at function
+ # definition time in order to capture the method before
+ # any additional monkeypatching by other code
+
+ if target == 'auto_get_gluster_endpoint':
+ ocp_client_node = list(g.config['ocp_servers']['client'].keys())[0]
+ gluster_pods = openshift_ops.get_ocp_gluster_pod_names(ocp_client_node)
+ if gluster_pods:
+ target = Pod(ocp_client_node, gluster_pods[0])
+ else:
+ target = list(g.config.get("gluster_servers", {}).keys())[0]
+
+ if isinstance(target, Pod):
+ prefix = ['oc', 'rsh', target.podname]
+ if isinstance(command, types.StringTypes):
+ cmd = ' '.join(prefix + [command])
+ else:
+ cmd = prefix + command
+
+ # unpack the tuple to make sure our return value exactly matches
+ # our docstring
+ return g.run(target.node, cmd, log_level=log_level)
+ else:
+ return orig_run(target, command, log_level=log_level)
+
+
+class GlustoPod(object):
+ """A context manager / decorator that monkeypatches the
+ glusto object to support running commands in pods.
+ """
+
+ def __init__(self, glusto_obj=None):
+ self.runfunc = None
+ self._g = glusto_obj or g
+
+ def __enter__(self):
+ """Patch glusto to use the wrapped run method.
+ """
+ self.runfunc = self._g.run
+ # we "capture" the prior glusto run method here in order to
+ # stack on top of any previous monkeypatches if they exist
+ self._g.run = partial(run, orig_run=self.runfunc)
+
+ def __exit__(self, etype, value, tb):
+ """Restore the orginal run method.
+ """
+ self._g.run = self.runfunc
+ self.runfunc = None
+
+ def __call__(self, func):
+ """Allow GlustoPod to be used as a decorator.
+ """
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ with self:
+ result = func(*args, **kwargs)
+ return result
+ return wrapper
diff --git a/openshift-storage-libs/openshiftstoragelibs/utils.py b/openshift-storage-libs/openshiftstoragelibs/utils.py
new file mode 100644
index 00000000..2d16c497
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/utils.py
@@ -0,0 +1,40 @@
+"""Generic host utility functions.
+
+Generic utility functions not specifc to a larger suite of tools.
+For example, not specific to OCP, Gluster, Heketi, etc.
+"""
+
+import random
+import string
+
+from prometheus_client.parser import text_string_to_metric_families
+
+
+def get_random_str(size=14):
+ chars = string.ascii_lowercase + string.digits
+ return ''.join(random.choice(chars) for _ in range(size))
+
+
+def parse_prometheus_data(text):
+ """Parse prometheus-formatted text to the python objects
+
+ Args:
+ text (str): prometheus-formatted data
+
+ Returns:
+ dict: parsed data as python dictionary
+ """
+ metrics = {}
+ for family in text_string_to_metric_families(text):
+ for sample in family.samples:
+ key, data, val = (sample.name, sample.labels, sample.value)
+ if data.keys():
+ data['value'] = val
+ if key in metrics.keys():
+ metrics[key].append(data)
+ else:
+ metrics[key] = [data]
+ else:
+ metrics[key] = val
+
+ return metrics
diff --git a/openshift-storage-libs/openshiftstoragelibs/waiter.py b/openshift-storage-libs/openshiftstoragelibs/waiter.py
new file mode 100644
index 00000000..0d0c8c3a
--- /dev/null
+++ b/openshift-storage-libs/openshiftstoragelibs/waiter.py
@@ -0,0 +1,38 @@
+"""Helper object to encapsulate waiting for timeouts.
+
+Provide a Waiter class which encapsulates the operation
+of doing an action in a loop until a timeout values elapses.
+It aims to avoid having to write boilerplate code comparing times.
+"""
+
+import time
+
+
+class Waiter(object):
+ """A wait-retry loop as iterable.
+ This object abstracts away the wait logic allowing functions
+ to write the retry logic in a for-loop.
+ """
+ def __init__(self, timeout=60, interval=1):
+ self.timeout = timeout
+ self.interval = interval
+ self.expired = False
+ self._attempt = 0
+ self._start = None
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self._start is None:
+ self._start = time.time()
+ if time.time() - self._start > self.timeout:
+ self.expired = True
+ raise StopIteration()
+ if self._attempt != 0:
+ time.sleep(self.interval)
+ self._attempt += 1
+ return self
+
+ # NOTE(vponomar): py3 uses "__next__" method instead of "next" one.
+ __next__ = next
diff --git a/openshift-storage-libs/setup.py b/openshift-storage-libs/setup.py
new file mode 100644
index 00000000..3e528cbf
--- /dev/null
+++ b/openshift-storage-libs/setup.py
@@ -0,0 +1,30 @@
+#!/usr/bin/python
+from setuptools import setup, find_packages
+
+version = '0.2'
+name = 'openshift-storage-libs'
+
+setup(
+ name=name,
+ version=version,
+ description='Red Hat Container-Native Storage Libraries',
+ author='Red Hat, Inc.',
+ author_email='cns-qe@redhat.com',
+ packages=find_packages(),
+ include_package_data=True,
+ classifiers=[
+ 'Development Status :: 3 - Alpha'
+ 'Intended Audience :: QE, Developers'
+ 'Operating System :: POSIX :: Linux'
+ 'Programming Language :: Python'
+ 'Programming Language :: Python :: 2'
+ 'Programming Language :: Python :: 2.6'
+ 'Programming Language :: Python :: 2.7'
+ 'Topic :: Software Development :: Testing'
+ ],
+ install_requires=['glusto', 'ddt', 'mock', 'rtyaml', 'jsondiff', 'six',
+ 'prometheus_client>=0.4.2'],
+ dependency_links=[
+ 'http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'
+ ],
+)