summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvamahaja <vamahaja@redhat.com>2019-02-08 14:01:53 +0530
committervponomar <vponomar@redhat.com>2019-02-22 11:22:41 +0000
commiteb396cc001519bfc3d27061a81a7cbf0692e4853 (patch)
tree1606ea5579a8c1c8da34a980080782bf0f7fd8c5
parentd62b88c330a36142f8e0ac18c3713908e3ca26be (diff)
Merged HeketiBaseClass to BaseClass
Renamed cns_baseclass.py to baseclass.py with updated BaseClass. Merged HeketiBaseClass to BaseClass with appropriate changes. Updated delete_volumes and delete_block_volumes methods used in test case method with heketi_delete_volume and heketi_blockvolume- delete. Change-Id: I8f623f1692cd863dc8ff041f7e8525ee0dc84f7b Signed-off-by: vamahaja <vamahaja@redhat.com>
-rw-r--r--cns-libs/cnslibs/cns/__init__.py0
-rw-r--r--cns-libs/cnslibs/cns/cns_baseclass.py251
-rw-r--r--cns-libs/cnslibs/common/baseclass.py257
-rw-r--r--cns-libs/cnslibs/common/heketi_libs.py120
-rw-r--r--tests/functional/common/arbiter/test_arbiter.py4
-rw-r--r--tests/functional/common/gluster_block/test_restart_gluster_block.py14
-rw-r--r--tests/functional/common/gluster_stability/test_gluster_services_restart.py2
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_disabling_device.py15
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py23
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py24
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_info.py4
-rw-r--r--tests/functional/common/heketi/test_block_volumes_heketi.py59
-rw-r--r--tests/functional/common/heketi/test_check_entries.py6
-rw-r--r--tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py4
-rw-r--r--tests/functional/common/heketi/test_device_info.py4
-rw-r--r--tests/functional/common/heketi/test_heketi_device_operations.py37
-rw-r--r--tests/functional/common/heketi/test_heketi_metrics.py14
-rw-r--r--tests/functional/common/heketi/test_heketi_volume_operations.py4
-rw-r--r--tests/functional/common/heketi/test_server_state_examine_gluster.py4
-rw-r--r--tests/functional/common/heketi/test_volume_creation.py15
-rw-r--r--tests/functional/common/heketi/test_volume_deletion.py22
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py4
-rw-r--r--tests/functional/common/heketi/test_volume_multi_req.py4
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py2
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py2
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py2
-rw-r--r--tests/functional/common/provisioning/test_storage_class_cases.py4
-rw-r--r--tests/functional/common/test_heketi_restart.py25
-rw-r--r--tests/functional/common/test_node_restart.py2
29 files changed, 416 insertions, 512 deletions
diff --git a/cns-libs/cnslibs/cns/__init__.py b/cns-libs/cnslibs/cns/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/cns-libs/cnslibs/cns/__init__.py
+++ /dev/null
diff --git a/cns-libs/cnslibs/cns/cns_baseclass.py b/cns-libs/cnslibs/cns/cns_baseclass.py
deleted file mode 100644
index 9e7912b2..00000000
--- a/cns-libs/cnslibs/cns/cns_baseclass.py
+++ /dev/null
@@ -1,251 +0,0 @@
-import datetime
-
-from glusto.core import Glusto as g
-
-from cnslibs.common import baseclass
-from cnslibs.common import command
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_ops import (
- heketi_blockvolume_delete,
- heketi_volume_delete)
-from cnslibs.common.openshift_ops import (
- get_pod_name_from_dc,
- get_pv_name_from_pvc,
- oc_create_app_dc_with_io,
- oc_create_pvc,
- oc_create_sc,
- oc_create_secret,
- oc_delete,
- oc_get_custom_resource,
- scale_dc_pod_amount_and_wait,
- verify_pvc_status_is_bound,
- wait_for_pod_be_ready,
- wait_for_resource_absence,
-)
-
-
-class BaseClass(baseclass.BaseClass):
- """Base class for test classes."""
-
- @classmethod
- def setUpClass(cls):
- """Initialize all the variables necessary for test cases."""
- super(BaseClass, cls).setUpClass()
-
- # Initializes OCP config variables
- cls.ocp_servers_info = g.config['ocp_servers']
- cls.ocp_master_node = g.config['ocp_servers']['master'].keys()
- cls.ocp_master_node_info = g.config['ocp_servers']['master']
- cls.ocp_client = g.config['ocp_servers']['client'].keys()
- cls.ocp_client_info = g.config['ocp_servers']['client']
- cls.ocp_nodes = g.config['ocp_servers']['nodes'].keys()
- cls.ocp_nodes_info = g.config['ocp_servers']['nodes']
-
- # Initializes storage project config variables
- openshift_config = g.config.get("cns", g.config.get("openshift"))
- cls.storage_project_name = openshift_config.get(
- 'storage_project_name',
- openshift_config.get('setup', {}).get('cns_project_name'))
-
- # Initializes heketi config variables
- heketi_config = openshift_config['heketi_config']
- cls.heketi_dc_name = heketi_config['heketi_dc_name']
- cls.heketi_service_name = heketi_config['heketi_service_name']
- cls.heketi_client_node = heketi_config['heketi_client_node']
- cls.heketi_server_url = heketi_config['heketi_server_url']
- cls.heketi_cli_user = heketi_config['heketi_cli_user']
- cls.heketi_cli_key = heketi_config['heketi_cli_key']
-
- cls.gluster_servers = g.config['gluster_servers'].keys()
- cls.gluster_servers_info = g.config['gluster_servers']
-
- cls.storage_classes = openshift_config['dynamic_provisioning'][
- 'storage_classes']
- cls.sc = cls.storage_classes.get(
- 'storage_class1', cls.storage_classes.get('file_storage_class'))
- cmd = "echo -n %s | base64" % cls.heketi_cli_key
- ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root")
- if ret != 0:
- raise ExecutionError("failed to execute cmd %s on %s out: %s "
- "err: %s" % (
- cmd, cls.ocp_master_node[0], out, err))
- cls.secret_data_key = out.strip()
-
- cmd = 'oc project %s' % cls.storage_project_name
- ret, out, err = g.run(cls.ocp_client[0], cmd, "root")
- if ret != 0:
- raise ExecutionError("failed to execute cmd %s on %s out: "
- "%s err: %s" % (
- cmd, cls.ocp_client[0], out, err))
-
- if 'glustotest_run_id' not in g.config:
- g.config['glustotest_run_id'] = (
- datetime.datetime.now().strftime('%H_%M_%d_%m_%Y'))
- cls.glustotest_run_id = g.config['glustotest_run_id']
- msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
- g.log.info(msg)
-
- def setUp(self):
- super(BaseClass, self).setUp()
- msg = "Starting Test : %s : %s" % (self.id(), self.glustotest_run_id)
- g.log.info(msg)
-
- def tearDown(self):
- super(BaseClass, self).tearDown()
- msg = "Ending Test: %s : %s" % (self.id(), self.glustotest_run_id)
- g.log.info(msg)
-
- @classmethod
- def tearDownClass(cls):
- super(BaseClass, cls).tearDownClass()
- msg = "Teardownclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
- g.log.info(msg)
-
- def cmd_run(self, cmd, hostname=None, raise_on_error=True):
- if not hostname:
- hostname = self.ocp_master_node[0]
- return command.cmd_run(
- cmd=cmd, hostname=hostname, raise_on_error=raise_on_error)
-
- def create_secret(self, secret_name_prefix="autotests-secret-"):
- secret_name = oc_create_secret(
- self.ocp_client[0],
- secret_name_prefix=secret_name_prefix,
- namespace=(self.sc.get(
- 'secretnamespace',
- self.sc.get('restsecretnamespace', 'default'))),
- data_key=self.heketi_cli_key,
- secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
- self.addCleanup(
- oc_delete, self.ocp_client[0], 'secret', secret_name)
- return secret_name
-
- def create_storage_class(self, secret_name=None,
- sc_name_prefix="autotests-sc",
- create_vol_name_prefix=False,
- allow_volume_expansion=False,
- reclaim_policy="Delete",
- set_hacount=None,
- is_arbiter_vol=False, arbiter_avg_file_size=None):
-
- # Create secret if one is not specified
- if not secret_name:
- secret_name = self.create_secret()
-
- # Create storage class
- secret_name_option = "secretname"
- secret_namespace_option = "secretnamespace"
- provisioner = self.sc.get("provisioner", "kubernetes.io/glusterfs")
- if provisioner != "kubernetes.io/glusterfs":
- secret_name_option = "rest%s" % secret_name_option
- secret_namespace_option = "rest%s" % secret_namespace_option
- parameters = {
- "resturl": self.sc["resturl"],
- "restuser": self.sc["restuser"],
- secret_name_option: secret_name,
- secret_namespace_option: self.sc.get(
- "secretnamespace", self.sc.get("restsecretnamespace")),
- }
- if set_hacount:
- parameters["hacount"] = self.sc.get("hacount", "3")
- if is_arbiter_vol:
- parameters["volumeoptions"] = "user.heketi.arbiter true"
- if arbiter_avg_file_size:
- parameters["volumeoptions"] += (
- ",user.heketi.average-file-size %s" % (
- arbiter_avg_file_size))
- if create_vol_name_prefix:
- parameters["volumenameprefix"] = self.sc.get(
- "volumenameprefix", "autotest")
- self.sc_name = oc_create_sc(
- self.ocp_client[0],
- sc_name_prefix=sc_name_prefix,
- provisioner=provisioner,
- allow_volume_expansion=allow_volume_expansion,
- reclaim_policy=reclaim_policy,
- **parameters)
- self.addCleanup(oc_delete, self.ocp_client[0], "sc", self.sc_name)
- return self.sc_name
-
- def create_and_wait_for_pvcs(self, pvc_size=1,
- pvc_name_prefix="autotests-pvc",
- pvc_amount=1, sc_name=None,
- timeout=120, wait_step=3):
- node = self.ocp_client[0]
-
- # Create storage class if not specified
- if not sc_name:
- if getattr(self, "sc_name", ""):
- sc_name = self.sc_name
- else:
- sc_name = self.create_storage_class()
-
- # Create PVCs
- pvc_names = []
- for i in range(pvc_amount):
- pvc_name = oc_create_pvc(
- node, sc_name, pvc_name_prefix=pvc_name_prefix,
- pvc_size=pvc_size)
- pvc_names.append(pvc_name)
- self.addCleanup(
- wait_for_resource_absence, node, 'pvc', pvc_name)
-
- # Wait for PVCs to be in bound state
- try:
- for pvc_name in pvc_names:
- verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
- finally:
- reclaim_policy = oc_get_custom_resource(
- node, 'sc', ':.reclaimPolicy', sc_name)[0]
-
- for pvc_name in pvc_names:
- if reclaim_policy == 'Retain':
- pv_name = get_pv_name_from_pvc(node, pvc_name)
- self.addCleanup(oc_delete, node, 'pv', pv_name,
- raise_on_absence=False)
- custom = (r':.metadata.annotations."gluster\.kubernetes'
- r'\.io\/heketi\-volume\-id"')
- vol_id = oc_get_custom_resource(
- node, 'pv', custom, pv_name)[0]
- if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
- self.addCleanup(heketi_volume_delete,
- self.heketi_client_node,
- self.heketi_server_url, vol_id,
- raise_on_error=False)
- else:
- self.addCleanup(heketi_blockvolume_delete,
- self.heketi_client_node,
- self.heketi_server_url, vol_id,
- raise_on_error=False)
- self.addCleanup(oc_delete, node, 'pvc', pvc_name,
- raise_on_absence=False)
-
- return pvc_names
-
- def create_and_wait_for_pvc(self, pvc_size=1,
- pvc_name_prefix='autotests-pvc', sc_name=None):
- self.pvc_name = self.create_and_wait_for_pvcs(
- pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix, sc_name=sc_name
- )[0]
- return self.pvc_name
-
- def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10):
- dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name)
- self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
- self.addCleanup(
- scale_dc_pod_amount_and_wait, self.ocp_client[0], dc_name, 0)
- pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name)
- wait_for_pod_be_ready(self.ocp_client[0], pod_name,
- timeout=timeout, wait_step=wait_step)
- return dc_name, pod_name
-
-
-class GlusterBlockBaseClass(BaseClass):
- """Base class for gluster-block test cases."""
-
- @classmethod
- def setUpClass(cls):
- """Initialize all the variables necessary for test cases."""
- super(GlusterBlockBaseClass, cls).setUpClass()
- cls.sc = cls.storage_classes.get(
- 'storage_class2', cls.storage_classes.get('block_storage_class'))
diff --git a/cns-libs/cnslibs/common/baseclass.py b/cns-libs/cnslibs/common/baseclass.py
index 36f00ff6..df3392fc 100644
--- a/cns-libs/cnslibs/common/baseclass.py
+++ b/cns-libs/cnslibs/common/baseclass.py
@@ -1,20 +1,262 @@
+import datetime
import unittest
from glusto.core import Glusto as g
+from cnslibs.common import command
+from cnslibs.common.exceptions import (
+ ExecutionError,
+ ConfigError
+)
+from cnslibs.common.heketi_ops import (
+ hello_heketi,
+ heketi_blockvolume_delete,
+ heketi_volume_delete
+)
+from cnslibs.common.openshift_ops import (
+ get_pod_name_from_dc,
+ get_pv_name_from_pvc,
+ oc_create_app_dc_with_io,
+ oc_create_pvc,
+ oc_create_sc,
+ oc_create_secret,
+ oc_delete,
+ oc_get_custom_resource,
+ scale_dc_pod_amount_and_wait,
+ switch_oc_project,
+ verify_pvc_status_is_bound,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence,
+)
-class BaseClass(unittest.TestCase):
+class BaseClass(unittest.TestCase):
+ """Base class for test classes."""
ERROR_OR_FAILURE_EXISTS = False
STOP_ON_FIRST_FAILURE = bool(g.config.get("common", {}).get(
"stop_on_first_failure", False))
+ @classmethod
+ def setUpClass(cls):
+ """Initialize all the variables necessary for test cases."""
+ super(BaseClass, cls).setUpClass()
+
+ # Initializes OCP config variables
+ cls.ocp_servers_info = g.config['ocp_servers']
+ cls.ocp_master_node = g.config['ocp_servers']['master'].keys()
+ cls.ocp_master_node_info = g.config['ocp_servers']['master']
+ cls.ocp_client = g.config['ocp_servers']['client'].keys()
+ cls.ocp_client_info = g.config['ocp_servers']['client']
+ cls.ocp_nodes = g.config['ocp_servers']['nodes'].keys()
+ cls.ocp_nodes_info = g.config['ocp_servers']['nodes']
+
+ # Initializes storage project config variables
+ openshift_config = g.config.get("cns", g.config.get("openshift"))
+ cls.storage_project_name = openshift_config.get(
+ 'storage_project_name',
+ openshift_config.get('setup', {}).get('cns_project_name'))
+
+ # Initializes heketi config variables
+ heketi_config = openshift_config['heketi_config']
+ cls.heketi_dc_name = heketi_config['heketi_dc_name']
+ cls.heketi_service_name = heketi_config['heketi_service_name']
+ cls.heketi_client_node = heketi_config['heketi_client_node']
+ cls.heketi_server_url = heketi_config['heketi_server_url']
+ cls.heketi_cli_user = heketi_config['heketi_cli_user']
+ cls.heketi_cli_key = heketi_config['heketi_cli_key']
+
+ cls.gluster_servers = g.config['gluster_servers'].keys()
+ cls.gluster_servers_info = g.config['gluster_servers']
+
+ cls.storage_classes = openshift_config['dynamic_provisioning'][
+ 'storage_classes']
+ cls.sc = cls.storage_classes.get(
+ 'storage_class1', cls.storage_classes.get('file_storage_class'))
+ cmd = "echo -n %s | base64" % cls.heketi_cli_key
+ ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: %s "
+ "err: %s" % (
+ cmd, cls.ocp_master_node[0], out, err))
+ cls.secret_data_key = out.strip()
+
+ # Checks if heketi server is alive
+ if not hello_heketi(cls.heketi_client_node, cls.heketi_server_url):
+ raise ConfigError("Heketi server %s is not alive"
+ % cls.heketi_server_url)
+
+ # Switch to the storage project
+ if not switch_oc_project(
+ cls.ocp_master_node[0], cls.storage_project_name):
+ raise ExecutionError("Failed to switch oc project on node %s"
+ % cls.ocp_master_node[0])
+
+ if 'glustotest_run_id' not in g.config:
+ g.config['glustotest_run_id'] = (
+ datetime.datetime.now().strftime('%H_%M_%d_%m_%Y'))
+ cls.glustotest_run_id = g.config['glustotest_run_id']
+ msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
+ g.log.info(msg)
+
def setUp(self):
if (BaseClass.STOP_ON_FIRST_FAILURE and
BaseClass.ERROR_OR_FAILURE_EXISTS):
self.skipTest("Test is skipped, because of the restriction "
"to one test case failure.")
- return super(BaseClass, self).setUp()
+
+ super(BaseClass, self).setUp()
+
+ msg = "Starting Test : %s : %s" % (self.id(), self.glustotest_run_id)
+ g.log.info(msg)
+
+ def tearDown(self):
+ super(BaseClass, self).tearDown()
+ msg = "Ending Test: %s : %s" % (self.id(), self.glustotest_run_id)
+ g.log.info(msg)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BaseClass, cls).tearDownClass()
+ msg = "Teardownclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
+ g.log.info(msg)
+
+ def cmd_run(self, cmd, hostname=None, raise_on_error=True):
+ if not hostname:
+ hostname = self.ocp_master_node[0]
+ return command.cmd_run(
+ cmd=cmd, hostname=hostname, raise_on_error=raise_on_error)
+
+ def create_secret(self, secret_name_prefix="autotests-secret-"):
+ secret_name = oc_create_secret(
+ self.ocp_client[0],
+ secret_name_prefix=secret_name_prefix,
+ namespace=(self.sc.get(
+ 'secretnamespace',
+ self.sc.get('restsecretnamespace', 'default'))),
+ data_key=self.heketi_cli_key,
+ secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
+ self.addCleanup(
+ oc_delete, self.ocp_client[0], 'secret', secret_name)
+ return secret_name
+
+ def create_storage_class(self, secret_name=None,
+ sc_name_prefix="autotests-sc",
+ create_vol_name_prefix=False,
+ allow_volume_expansion=False,
+ reclaim_policy="Delete",
+ set_hacount=None,
+ is_arbiter_vol=False, arbiter_avg_file_size=None):
+
+ # Create secret if one is not specified
+ if not secret_name:
+ secret_name = self.create_secret()
+
+ # Create storage class
+ secret_name_option = "secretname"
+ secret_namespace_option = "secretnamespace"
+ provisioner = self.sc.get("provisioner", "kubernetes.io/glusterfs")
+ if provisioner != "kubernetes.io/glusterfs":
+ secret_name_option = "rest%s" % secret_name_option
+ secret_namespace_option = "rest%s" % secret_namespace_option
+ parameters = {
+ "resturl": self.sc["resturl"],
+ "restuser": self.sc["restuser"],
+ secret_name_option: secret_name,
+ secret_namespace_option: self.sc.get(
+ "secretnamespace", self.sc.get("restsecretnamespace")),
+ }
+ if set_hacount:
+ parameters["hacount"] = self.sc.get("hacount", "3")
+ if is_arbiter_vol:
+ parameters["volumeoptions"] = "user.heketi.arbiter true"
+ if arbiter_avg_file_size:
+ parameters["volumeoptions"] += (
+ ",user.heketi.average-file-size %s" % (
+ arbiter_avg_file_size))
+ if create_vol_name_prefix:
+ parameters["volumenameprefix"] = self.sc.get(
+ "volumenameprefix", "autotest")
+ self.sc_name = oc_create_sc(
+ self.ocp_client[0],
+ sc_name_prefix=sc_name_prefix,
+ provisioner=provisioner,
+ allow_volume_expansion=allow_volume_expansion,
+ reclaim_policy=reclaim_policy,
+ **parameters)
+ self.addCleanup(oc_delete, self.ocp_client[0], "sc", self.sc_name)
+ return self.sc_name
+
+ def create_and_wait_for_pvcs(self, pvc_size=1,
+ pvc_name_prefix="autotests-pvc",
+ pvc_amount=1, sc_name=None,
+ timeout=120, wait_step=3):
+ node = self.ocp_client[0]
+
+ # Create storage class if not specified
+ if not sc_name:
+ if getattr(self, "sc_name", ""):
+ sc_name = self.sc_name
+ else:
+ sc_name = self.create_storage_class()
+
+ # Create PVCs
+ pvc_names = []
+ for i in range(pvc_amount):
+ pvc_name = oc_create_pvc(
+ node, sc_name, pvc_name_prefix=pvc_name_prefix,
+ pvc_size=pvc_size)
+ pvc_names.append(pvc_name)
+ self.addCleanup(
+ wait_for_resource_absence, node, 'pvc', pvc_name)
+
+ # Wait for PVCs to be in bound state
+ try:
+ for pvc_name in pvc_names:
+ verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
+ finally:
+ reclaim_policy = oc_get_custom_resource(
+ node, 'sc', ':.reclaimPolicy', sc_name)[0]
+
+ for pvc_name in pvc_names:
+ if reclaim_policy == 'Retain':
+ pv_name = get_pv_name_from_pvc(node, pvc_name)
+ self.addCleanup(oc_delete, node, 'pv', pv_name,
+ raise_on_absence=False)
+ custom = (r':.metadata.annotations."gluster\.kubernetes'
+ r'\.io\/heketi\-volume\-id"')
+ vol_id = oc_get_custom_resource(
+ node, 'pv', custom, pv_name)[0]
+ if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
+ self.addCleanup(heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url, vol_id,
+ raise_on_error=False)
+ else:
+ self.addCleanup(heketi_blockvolume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url, vol_id,
+ raise_on_error=False)
+ self.addCleanup(oc_delete, node, 'pvc', pvc_name,
+ raise_on_absence=False)
+
+ return pvc_names
+
+ def create_and_wait_for_pvc(self, pvc_size=1,
+ pvc_name_prefix='autotests-pvc', sc_name=None):
+ self.pvc_name = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix, sc_name=sc_name
+ )[0]
+ return self.pvc_name
+
+ def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10):
+ dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name)
+ self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
+ self.addCleanup(
+ scale_dc_pod_amount_and_wait, self.ocp_client[0], dc_name, 0)
+ pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name)
+ wait_for_pod_be_ready(self.ocp_client[0], pod_name,
+ timeout=timeout, wait_step=wait_step)
+ return dc_name, pod_name
def _is_error_or_failure_exists(self):
if hasattr(self, '_outcome'):
@@ -63,3 +305,14 @@ class BaseClass(unittest.TestCase):
"kwargs = %s" % (func, args, kwargs))
g.log.warn(msg)
return super(BaseClass, cls).doClassCleanups()
+
+
+class GlusterBlockBaseClass(BaseClass):
+ """Base class for gluster-block test cases."""
+
+ @classmethod
+ def setUpClass(cls):
+ """Initialize all the variables necessary for test cases."""
+ super(GlusterBlockBaseClass, cls).setUpClass()
+ cls.sc = cls.storage_classes.get(
+ 'storage_class2', cls.storage_classes.get('block_storage_class'))
diff --git a/cns-libs/cnslibs/common/heketi_libs.py b/cns-libs/cnslibs/common/heketi_libs.py
deleted file mode 100644
index 1c86776c..00000000
--- a/cns-libs/cnslibs/common/heketi_libs.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import datetime
-
-from glusto.core import Glusto as g
-
-from cnslibs.common import baseclass
-from cnslibs.common.exceptions import ExecutionError, ConfigError
-from cnslibs.common.heketi_ops import (hello_heketi,
- heketi_volume_delete,
- heketi_blockvolume_delete)
-from cnslibs.common import openshift_ops
-
-
-class HeketiBaseClass(baseclass.BaseClass):
- """
- This class initializes heketi config variables, constructs topology info
- dictionary and check if heketi server is alive.
- """
-
- @classmethod
- def setUpClass(cls):
- """
- setUpClass of HeketiBaseClass
- """
-
- super(HeketiBaseClass, cls).setUpClass()
-
- # Initializes config variables
- openshift_config = g.config.get("cns", g.config.get("openshift"))
- cls.storage_project_name = openshift_config.get(
- 'storage_project_name',
- openshift_config.get('setup', {}).get('cns_project_name'))
-
- cls.ocp_master_nodes = g.config['ocp_servers']['master'].keys()
- cls.ocp_master_node = cls.ocp_master_nodes[0]
-
- heketi_config = openshift_config['heketi_config']
- cls.heketi_dc_name = heketi_config['heketi_dc_name']
- cls.heketi_service_name = heketi_config['heketi_service_name']
- cls.heketi_client_node = heketi_config['heketi_client_node']
- cls.heketi_server_url = heketi_config['heketi_server_url']
- cls.heketi_cli_user = heketi_config['heketi_cli_user']
- cls.heketi_cli_key = heketi_config['heketi_cli_key']
- cls.gluster_servers = g.config['gluster_servers'].keys()
- cls.gluster_servers_info = g.config['gluster_servers']
-
- # Checks if heketi server is alive
- if not hello_heketi(cls.heketi_client_node, cls.heketi_server_url):
- raise ConfigError("Heketi server %s is not alive"
- % cls.heketi_server_url)
-
- # Switch to the storage project
- if not openshift_ops.switch_oc_project(
- cls.ocp_master_node, cls.storage_project_name):
- raise ExecutionError("Failed to switch oc project on node %s"
- % cls.ocp_master_node)
-
- # Have a unique string to recognize the test run for logging
- if 'glustotest_run_id' not in g.config:
- g.config['glustotest_run_id'] = (
- datetime.datetime.now().strftime('%H_%M_%d_%m_%Y'))
- cls.glustotest_run_id = g.config['glustotest_run_id']
- msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
- g.log.info(msg)
-
- def setUp(self):
- super(HeketiBaseClass, self).setUp()
- msg = "Starting Test : %s : %s" % (self.id(), self.glustotest_run_id)
- g.log.info(msg)
-
- def delete_volumes(self, volume_ids):
- """
- Delete volumes by their IDs and raise error with list of failures
- Input: (volume_ids) It can be a single volume ID
- or a list of volume IDs
- """
- errored_ids = []
-
- if not isinstance(volume_ids, (list, set, tuple)):
- volume_ids = [volume_ids]
-
- for volume_id in volume_ids:
- out = heketi_volume_delete(
- self.heketi_client_node, self.heketi_server_url, volume_id)
- output_str = 'Volume %s deleted' % volume_id
- if output_str not in out:
- errored_ids.append(volume_id)
-
- if errored_ids:
- raise ExecutionError(
- "Failed to delete following heketi volumes: "
- "%s" % ',\n'.join(errored_ids))
-
- def delete_block_volumes(self, volume_ids):
- """
- Delete block volumes by their volume IDs and raise an error on failures
- Args:
- volume_ids (str) : Volume ID of the block volume
- """
- if not isinstance(volume_ids, (list, set, tuple)):
- volume_ids = [volume_ids]
-
- fail = False
- for volume_id in volume_ids:
- block_out = heketi_blockvolume_delete(
- self.heketi_client_node, self.heketi_server_url, volume_id)
- if block_out is False:
- g.log.error("Block volume delete failed %s " % volume_id)
- fail = True
- self.assertFalse(fail, "Failed to delete blockvolumes")
-
- def tearDown(self):
- super(HeketiBaseClass, self).tearDown()
- msg = "Ending Test: %s : %s" % (self.id(), self.glustotest_run_id)
- g.log.info(msg)
-
- @classmethod
- def tearDownClass(cls):
- super(HeketiBaseClass, cls).tearDownClass()
- msg = "Teardownclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
- g.log.info(msg)
diff --git a/tests/functional/common/arbiter/test_arbiter.py b/tests/functional/common/arbiter/test_arbiter.py
index 9a701fc5..587a74d3 100644
--- a/tests/functional/common/arbiter/test_arbiter.py
+++ b/tests/functional/common/arbiter/test_arbiter.py
@@ -1,6 +1,6 @@
import ddt
-from cnslibs.cns import cns_baseclass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common import heketi_ops
from cnslibs.common import heketi_version
from cnslibs.common.openshift_ops import (
@@ -18,7 +18,7 @@ from cnslibs.common.openshift_ops import (
@ddt.ddt
-class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
+class TestArbiterVolumeCreateExpandDelete(BaseClass):
def setUp(self):
super(TestArbiterVolumeCreateExpandDelete, self).setUp()
diff --git a/tests/functional/common/gluster_block/test_restart_gluster_block.py b/tests/functional/common/gluster_block/test_restart_gluster_block.py
index be5c4e9b..90c10dec 100644
--- a/tests/functional/common/gluster_block/test_restart_gluster_block.py
+++ b/tests/functional/common/gluster_block/test_restart_gluster_block.py
@@ -1,4 +1,4 @@
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (
heketi_blockvolume_create,
heketi_blockvolume_delete)
@@ -9,7 +9,7 @@ from cnslibs.common.openshift_ops import (
wait_for_resource_absence)
-class TestRestartGlusterBlockPod(HeketiBaseClass):
+class TestRestartGlusterBlockPod(BaseClass):
def test_restart_gluster_block_provisioner_pod(self):
"""Restart gluster-block provisioner pod
@@ -26,13 +26,13 @@ class TestRestartGlusterBlockPod(HeketiBaseClass):
# restart gluster-block-provisioner-pod
dc_name = "glusterblock-%s-provisioner-dc" % self.storage_project_name
- pod_name = get_pod_name_from_dc(self.ocp_master_node, dc_name)
- oc_delete(self.ocp_master_node, 'pod', pod_name)
- wait_for_resource_absence(self.ocp_master_node, 'pod', pod_name)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
+ oc_delete(self.ocp_master_node[0], 'pod', pod_name)
+ wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)
# new gluster-pod name
- pod_name = get_pod_name_from_dc(self.ocp_master_node, dc_name)
- wait_for_pod_be_ready(self.ocp_master_node, pod_name)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
+ wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)
# create new heketi block volume
vol_info = heketi_blockvolume_create(self.heketi_client_node,
diff --git a/tests/functional/common/gluster_stability/test_gluster_services_restart.py b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
index 2c8603a2..2170815e 100644
--- a/tests/functional/common/gluster_stability/test_gluster_services_restart.py
+++ b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
@@ -6,7 +6,7 @@ from unittest import skip
import ddt
from glusto.core import Glusto as g
-from cnslibs.cns.cns_baseclass import BaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import heketi_blockvolume_list
from cnslibs.common.openshift_ops import (
get_pod_name_from_dc,
diff --git a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
index 6139f64d..f0e2c5c6 100644
--- a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
+++ b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
@@ -2,12 +2,12 @@ from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import get_volume_info
from cnslibs.common import exceptions
-from cnslibs.common import heketi_libs
+from cnslibs.common import baseclass
from cnslibs.common import heketi_ops
from cnslibs.common import podcmd
-class TestDisableHeketiDevice(heketi_libs.HeketiBaseClass):
+class TestDisableHeketiDevice(baseclass.BaseClass):
@podcmd.GlustoPod()
def test_create_volumes_enabling_and_disabling_heketi_devices(self):
"""Validate enable/disable of heketi device"""
@@ -57,7 +57,9 @@ class TestDisableHeketiDevice(heketi_libs.HeketiBaseClass):
self.assertTrue(out, "Failed to create heketi volume of size 1")
g.log.info("Successfully created heketi volume of size 1")
device_id = out["bricks"][0]["device"]
- self.addCleanup(self.delete_volumes, [out["bricks"][0]["volume"]])
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, out["bricks"][0]["volume"])
# Disable device
g.log.info("Disabling '%s' device" % device_id)
@@ -90,7 +92,8 @@ class TestDisableHeketiDevice(heketi_libs.HeketiBaseClass):
g.log.info("Volume was not created as expected.")
else:
self.addCleanup(
- self.delete_volumes, [out["bricks"][0]["volume"]])
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, out["bricks"][0]["volume"])
msg = "Volume unexpectedly created. Out: %s" % out
assert False, msg
finally:
@@ -116,7 +119,9 @@ class TestDisableHeketiDevice(heketi_libs.HeketiBaseClass):
out = heketi_ops.heketi_volume_create(
self.heketi_client_node, self.heketi_server_url, 1, json=True)
self.assertTrue(out, "Failed to create volume of size 1")
- self.addCleanup(self.delete_volumes, [out["bricks"][0]["volume"]])
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, out["bricks"][0]["volume"])
g.log.info("Successfully created volume of size 1")
name = out["name"]
diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
index b03e5e30..c1be0d86 100644
--- a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
+++ b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
@@ -3,7 +3,7 @@ from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
import six
from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_volume_list,
heketi_volume_info,
@@ -13,11 +13,12 @@ from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_cluster_delete,
heketi_node_info,
heketi_node_list,
- heketi_node_delete)
+ heketi_node_delete,
+ heketi_volume_delete)
from cnslibs.common import podcmd
-class TestHeketiVolume(HeketiBaseClass):
+class TestHeketiVolume(BaseClass):
"""
Class to test heketi volume create
"""
@@ -45,7 +46,9 @@ class TestHeketiVolume(HeketiBaseClass):
self.volume_size, json=True)
g.log.info("Heketi volume successfully created" % out)
volume_id = out["bricks"][0]["volume"]
- self.addCleanup(self.delete_volumes, volume_id)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
g.log.info("List heketi volumes after volume creation")
h_vol_list = heketi_volume_list(
@@ -82,7 +85,9 @@ class TestHeketiVolume(HeketiBaseClass):
"volume of size %s" % self.volume_size))
g.log.info("Heketi volume successfully created" % out)
volume_id = out["bricks"][0]["volume"]
- self.addCleanup(self.delete_volumes, volume_id)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
g.log.info("Retrieving heketi volume info")
out = heketi_volume_info(
@@ -112,7 +117,9 @@ class TestHeketiVolume(HeketiBaseClass):
"volume of size %s" % self.volume_size))
g.log.info("Heketi volume successfully created" % out)
volume_id = out["bricks"][0]["volume"]
- self.addCleanup(self.delete_volumes, volume_id)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
# List heketi cluster's
g.log.info("Listing heketi cluster list")
@@ -154,7 +161,9 @@ class TestHeketiVolume(HeketiBaseClass):
self.assertTrue(vol, "Failed to create heketi volume.")
g.log.info("Heketi volume successfully created")
volume_id = vol["bricks"][0]["volume"]
- self.addCleanup(self.delete_volumes, volume_id)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
# Pick up suitable node
node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
index 9fac9e01..b8ce2c71 100644
--- a/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
+++ b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
@@ -1,16 +1,18 @@
"""Test cases to disable and enable node in heketi."""
import json
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (heketi_node_enable,
heketi_node_info,
heketi_node_disable,
heketi_node_list,
- heketi_volume_create)
+ heketi_volume_create,
+ heketi_volume_delete
+ )
from glusto.core import Glusto as g
-class TestHeketiNodeState(HeketiBaseClass):
+class TestHeketiNodeState(BaseClass):
"""Test node enable and disable functionality."""
def enable_node(self, node_id):
@@ -107,9 +109,9 @@ class TestHeketiNodeState(HeketiBaseClass):
vol_info = heketi_volume_create(self.heketi_client_node,
self.heketi_server_url, vol_size,
json=True)
- self.assertTrue(vol_info, (
- "Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(self.delete_volumes, vol_info['id'])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
node_id = online_hosts[0]['id']
g.log.info("going to disable node id %s", node_id)
@@ -122,7 +124,9 @@ class TestHeketiNodeState(HeketiBaseClass):
vol_size, raw_cli_output=True)
if ret == 0:
out_json = json.loads(out)
- self.addCleanup(self.delete_volumes, out_json["id"])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, out_json["id"])
self.assertNotEqual(ret, 0,
("Volume creation did not fail ret- %s "
"out- %s err- %s" % (ret, out, err)))
@@ -135,6 +139,6 @@ class TestHeketiNodeState(HeketiBaseClass):
vol_info = heketi_volume_create(self.heketi_client_node,
self.heketi_server_url, vol_size,
json=True)
- self.assertTrue(vol_info, (
- "Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(self.delete_volumes, vol_info['id'])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_info.py b/tests/functional/common/heketi/heketi_tests/test_node_info.py
index 3f956d62..ad60b844 100644
--- a/tests/functional/common/heketi/heketi_tests/test_node_info.py
+++ b/tests/functional/common/heketi/heketi_tests/test_node_info.py
@@ -2,11 +2,11 @@ from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.peer_ops import get_pool_list
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common import heketi_ops, podcmd
-class TestHeketiVolume(HeketiBaseClass):
+class TestHeketiVolume(BaseClass):
"""
Class to test heketi volume create
"""
diff --git a/tests/functional/common/heketi/test_block_volumes_heketi.py b/tests/functional/common/heketi/test_block_volumes_heketi.py
index 1a460a96..b75f58ac 100644
--- a/tests/functional/common/heketi/test_block_volumes_heketi.py
+++ b/tests/functional/common/heketi/test_block_volumes_heketi.py
@@ -5,10 +5,10 @@ from cnslibs.common.heketi_ops import (heketi_blockvolume_create,
heketi_volume_create,
heketi_volume_delete
)
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
-class TestBlockVolumeOps(HeketiBaseClass):
+class TestBlockVolumeOps(BaseClass):
"""Class to test heketi block volume deletion with and without block
volumes existing, heketi block volume list, heketi block volume info
and heketi block volume creation with name and block volumes creation
@@ -22,49 +22,44 @@ class TestBlockVolumeOps(HeketiBaseClass):
block_host_create_info = heketi_volume_create(
self.heketi_client_node, self.heketi_server_url, 5,
json=True, block=True)
- self.assertNotEqual(block_host_create_info, False,
- "Block host volume creation failed")
- block_hosting_vol_id = block_host_create_info["id"]
- self.addCleanup(self.delete_volumes, block_hosting_vol_id)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_host_create_info["id"])
+
block_vol = heketi_blockvolume_create(
self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.assertNotEqual(block_vol, False, "Block volume creation failed")
- self.addCleanup(self.delete_block_volumes, block_vol["id"])
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_vol["id"])
def test_block_host_volume_delete_without_block_volumes(self):
"""Validate deletion of empty block hosting volume"""
block_host_create_info = heketi_volume_create(
self.heketi_client_node, self.heketi_server_url, 1, json=True,
block=True)
- self.assertNotEqual(block_host_create_info, False,
- "Block host volume creation failed")
+
block_hosting_vol_id = block_host_create_info["id"]
- self.addCleanup(heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, block_hosting_vol_id,
- raise_on_error=False)
- block_host_delete_output = heketi_volume_delete(
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_hosting_vol_id, raise_on_error=False)
+
+ heketi_volume_delete(
self.heketi_client_node, self.heketi_server_url,
block_hosting_vol_id, json=True)
- self.assertNotEqual(
- block_host_delete_output, False,
- "Block host volume delete failed, ID: %s" % block_hosting_vol_id)
def test_block_volume_delete(self):
"""Validate deletion of gluster-block volume and capacity of used pool
"""
block_vol = heketi_blockvolume_create(
self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.assertNotEqual(block_vol, False,
- "Block volume creation has failed")
- self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
- self.heketi_server_url, block_vol["id"],
- raise_on_error=False)
- block_delete_output = heketi_blockvolume_delete(
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_vol["id"], raise_on_error=False)
+
+ heketi_blockvolume_delete(
self.heketi_client_node, self.heketi_server_url,
block_vol["id"], json=True)
- self.assertNotEqual(block_delete_output, False,
- "deletion of block volume has failed, ID: %s"
- % block_vol["id"])
+
volume_list = heketi_blockvolume_list(
self.heketi_client_node, self.heketi_server_url, json=True)
self.assertNotIn(block_vol["id"], volume_list["blockvolumes"],
@@ -76,14 +71,16 @@ class TestBlockVolumeOps(HeketiBaseClass):
created_vol_ids = []
for count in range(3):
block_vol = heketi_blockvolume_create(
- self.heketi_client_node, self.heketi_server_url,
- 1, json=True)
- self.assertNotEqual(block_vol, False,
- "Block volume creation has failed")
- self.addCleanup(self.delete_block_volumes, block_vol["id"])
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_vol["id"])
+
created_vol_ids.append(block_vol["id"])
+
volumes = heketi_blockvolume_list(
self.heketi_client_node, self.heketi_server_url, json=True)
+
existing_vol_ids = volumes.values()[0]
for vol_id in created_vol_ids:
self.assertIn(vol_id, existing_vol_ids,
diff --git a/tests/functional/common/heketi/test_check_entries.py b/tests/functional/common/heketi/test_check_entries.py
index e8479226..12fc21a2 100644
--- a/tests/functional/common/heketi/test_check_entries.py
+++ b/tests/functional/common/heketi/test_check_entries.py
@@ -1,17 +1,17 @@
from glusto.core import Glusto as g
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_volume_delete)
from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
-class TestHeketiVolume(HeketiBaseClass):
+class TestHeketiVolume(BaseClass):
"""Check volume bricks presence in fstab files on Gluster PODs."""
def _find_bricks_in_fstab_files(self, brick_paths, present):
"""Make sure that vol brick paths either exist or not in fstab file."""
- oc_node = self.ocp_master_nodes[0]
+ oc_node = self.ocp_master_node[0]
gluster_pods = get_ocp_gluster_pod_names(oc_node)
get_fstab_entries_cmd = "oc exec %s -- cat /var/lib/heketi/fstab"
fstab_files_data = ''
diff --git a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
index c79ae5ee..93ef0593 100644
--- a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
+++ b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
@@ -5,7 +5,7 @@ from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
from cnslibs.common import exceptions
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (heketi_node_list,
heketi_node_enable,
heketi_node_disable,
@@ -18,7 +18,7 @@ from cnslibs.common.heketi_ops import (heketi_node_list,
from cnslibs.common import podcmd
-class TestHeketiVolume(HeketiBaseClass):
+class TestHeketiVolume(BaseClass):
def setUp(self):
super(TestHeketiVolume, self).setUp()
diff --git a/tests/functional/common/heketi/test_device_info.py b/tests/functional/common/heketi/test_device_info.py
index d1214537..a48fd814 100644
--- a/tests/functional/common/heketi/test_device_info.py
+++ b/tests/functional/common/heketi/test_device_info.py
@@ -1,8 +1,8 @@
-from cnslibs.common import heketi_libs
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common import heketi_ops
-class TestHeketiDeviceInfo(heketi_libs.HeketiBaseClass):
+class TestHeketiDeviceInfo(BaseClass):
def test_heketi_devices_info_verification(self):
"""Validate whether device related information is displayed"""
diff --git a/tests/functional/common/heketi/test_heketi_device_operations.py b/tests/functional/common/heketi/test_heketi_device_operations.py
index 0ad81f48..8bd87089 100644
--- a/tests/functional/common/heketi/test_heketi_device_operations.py
+++ b/tests/functional/common/heketi/test_heketi_device_operations.py
@@ -4,7 +4,7 @@ import ddt
from glusto.core import Glusto as g
from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (heketi_node_enable,
heketi_node_info,
heketi_node_disable,
@@ -16,11 +16,12 @@ from cnslibs.common.heketi_ops import (heketi_node_enable,
heketi_device_remove,
heketi_device_info,
heketi_device_enable,
- heketi_topology_info)
+ heketi_topology_info,
+ heketi_volume_delete)
@ddt.ddt
-class TestHeketiDeviceOperations(HeketiBaseClass):
+class TestHeketiDeviceOperations(BaseClass):
"""Test Heketi device enable/disable and remove functionality."""
def check_any_of_bricks_present_in_device(self, bricks, device_id):
@@ -124,7 +125,9 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
json=True)
self.assertTrue(vol_info, (
"Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(self.delete_volumes, vol_info['id'])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
# Check that one of volume's bricks is present on the device
present = self.check_any_of_bricks_present_in_device(
@@ -144,7 +147,9 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
self.heketi_client_node, self.heketi_server_url,
vol_size, json=True, raw_cli_output=True)
if ret == 0:
- self.addCleanup(self.delete_volumes, json.loads(out)["id"])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, json.loads(out)["id"])
self.assertNotEqual(ret, 0,
("Volume creation did not fail. ret- %s "
"out- %s err- %s" % (ret, out, err)))
@@ -159,9 +164,9 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
vol_info = heketi_volume_create(self.heketi_client_node,
self.heketi_server_url, vol_size,
json=True)
- self.assertTrue(vol_info, (
- "Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(self.delete_volumes, vol_info['id'])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
# Check that one of volume's bricks is present on the device
present = self.check_any_of_bricks_present_in_device(
@@ -226,9 +231,9 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
vol_info = heketi_volume_create(
self.heketi_client_node, self.heketi_server_url, vol_size,
json=True)
- self.assertTrue(vol_info, (
- "Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(self.delete_volumes, vol_info['id'])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
# Add extra device, then remember it's ID and size
heketi_device_add(self.heketi_client_node, self.heketi_server_url,
@@ -310,9 +315,9 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
vol_info = heketi_volume_create(self.heketi_client_node,
self.heketi_server_url, vol_size,
json=True)
- self.assertTrue(vol_info, (
- "Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(self.delete_volumes, vol_info['id'])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
if delete_device:
return
@@ -391,7 +396,9 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
vol_size_gb -= 1
heketi_vol = heketi_volume_create(
heketi_node, heketi_url, vol_size_gb, json=True)
- self.addCleanup(self.delete_volumes, heketi_vol["bricks"][0]["volume"])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, heketi_vol["bricks"][0]["volume"])
# Try to 'remove' bigger Heketi disk expecting error,
# because there is no space on smaller disk to relocate bricks to
diff --git a/tests/functional/common/heketi/test_heketi_metrics.py b/tests/functional/common/heketi/test_heketi_metrics.py
index 701f7d81..4653caee 100644
--- a/tests/functional/common/heketi/test_heketi_metrics.py
+++ b/tests/functional/common/heketi/test_heketi_metrics.py
@@ -1,5 +1,5 @@
from cnslibs.common import exceptions
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (
get_heketi_metrics,
heketi_cluster_info,
@@ -17,7 +17,7 @@ from cnslibs.common.openshift_ops import (
)
-class TestHeketiMetrics(HeketiBaseClass):
+class TestHeketiMetrics(BaseClass):
def setUp(self):
self.node = self.ocp_master_node[0]
@@ -177,9 +177,9 @@ class TestHeketiMetrics(HeketiBaseClass):
def test_heketi_metrics_heketipod_failure(self):
"""Validate heketi metrics after heketi pod failure"""
scale_dc_pod_amount_and_wait(
- self.ocp_master_node, self.heketi_dc_name, pod_amount=0)
+ self.ocp_master_node[0], self.heketi_dc_name, pod_amount=0)
self.addCleanup(
- scale_dc_pod_amount_and_wait, self.ocp_master_node,
+ scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
self.heketi_dc_name, pod_amount=1)
# verify that metrics is not accessable when heketi pod is down
@@ -190,11 +190,11 @@ class TestHeketiMetrics(HeketiBaseClass):
prometheus_format=True)
scale_dc_pod_amount_and_wait(
- self.ocp_master_node, self.heketi_dc_name, pod_amount=1)
+ self.ocp_master_node[0], self.heketi_dc_name, pod_amount=1)
pod_name = get_pod_name_from_dc(
- self.ocp_master_node, self.heketi_dc_name, self.heketi_dc_name)
- wait_for_pod_be_ready(self.ocp_master_node, pod_name, wait_step=5)
+ self.ocp_master_node[0], self.heketi_dc_name, self.heketi_dc_name)
+ wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)
for i in range(3):
vol = heketi_volume_create(
diff --git a/tests/functional/common/heketi/test_heketi_volume_operations.py b/tests/functional/common/heketi/test_heketi_volume_operations.py
index f82521c5..d7b9aa18 100644
--- a/tests/functional/common/heketi/test_heketi_volume_operations.py
+++ b/tests/functional/common/heketi/test_heketi_volume_operations.py
@@ -2,10 +2,10 @@ from cnslibs.common.heketi_ops import (heketi_volume_delete,
heketi_volume_create,
heketi_volume_expand,
heketi_volume_info)
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
-class TestHeketiVolumeOperations(HeketiBaseClass):
+class TestHeketiVolumeOperations(BaseClass):
"""
Class to test heketi volume operations - create, expand
"""
diff --git a/tests/functional/common/heketi/test_server_state_examine_gluster.py b/tests/functional/common/heketi/test_server_state_examine_gluster.py
index 5b904e8f..f74366ed 100644
--- a/tests/functional/common/heketi/test_server_state_examine_gluster.py
+++ b/tests/functional/common/heketi/test_server_state_examine_gluster.py
@@ -1,10 +1,10 @@
-from cnslibs.cns import cns_baseclass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common import heketi_ops
from cnslibs.common import heketi_version
from cnslibs.common import openshift_ops
-class TestHeketiServerStateExamineGluster(cns_baseclass.BaseClass):
+class TestHeketiServerStateExamineGluster(BaseClass):
def setUp(self):
self.node = self.ocp_master_node[0]
diff --git a/tests/functional/common/heketi/test_volume_creation.py b/tests/functional/common/heketi/test_volume_creation.py
index 5820b789..86618505 100644
--- a/tests/functional/common/heketi/test_volume_creation.py
+++ b/tests/functional/common/heketi/test_volume_creation.py
@@ -2,12 +2,12 @@ from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops
from cnslibs.common import exceptions
-from cnslibs.common import heketi_libs
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common import heketi_ops
from cnslibs.common import podcmd
-class TestVolumeCreationTestCases(heketi_libs.HeketiBaseClass):
+class TestVolumeCreationTestCases(BaseClass):
"""
Class for volume creation related test cases
"""
@@ -29,7 +29,9 @@ class TestVolumeCreationTestCases(heketi_libs.HeketiBaseClass):
volume_name = output_dict["name"]
volume_id = output_dict["id"]
- self.addCleanup(self.delete_volumes, volume_id)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
self.assertEqual(output_dict["durability"]
["replicate"]["replica"], 3,
@@ -127,7 +129,9 @@ class TestVolumeCreationTestCases(heketi_libs.HeketiBaseClass):
# Create first small volume
vol = heketi_ops.heketi_volume_create(node, server_url, 1, json=True)
- self.addCleanup(self.delete_volumes, vol["id"])
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol["id"])
# Try to create second volume getting "no free space" error
try:
@@ -137,7 +141,8 @@ class TestVolumeCreationTestCases(heketi_libs.HeketiBaseClass):
g.log.info("Volume was not created as expected.")
else:
self.addCleanup(
- self.delete_volumes, vol_fail["bricks"][0]["volume"])
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_fail["bricks"][0]["volume"])
self.assertFalse(
vol_fail,
"Volume should have not been created. Out: %s" % vol_fail)
diff --git a/tests/functional/common/heketi/test_volume_deletion.py b/tests/functional/common/heketi/test_volume_deletion.py
index b1be795b..6f279899 100644
--- a/tests/functional/common/heketi/test_volume_deletion.py
+++ b/tests/functional/common/heketi/test_volume_deletion.py
@@ -1,11 +1,11 @@
from __future__ import division
from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common import heketi_ops
-class TestVolumeDeleteTestCases(HeketiBaseClass):
+class TestVolumeDeleteTestCases(BaseClass):
"""
Class for volume deletion related test cases
@@ -40,18 +40,12 @@ class TestVolumeDeleteTestCases(HeketiBaseClass):
self.heketi_client_node,
self.heketi_server_url, 10, json=True)
- self.assertNotEqual(creation_output_dict, False,
- "Volume creation failed")
-
volume_id = creation_output_dict["name"].strip().split("_")[1]
free_space_after_creation = self.get_free_space_summary_devices()
- deletion_output = heketi_ops.heketi_volume_delete(
+ heketi_ops.heketi_volume_delete(
self.heketi_client_node, self.heketi_server_url, volume_id)
- self.assertNotEqual(deletion_output, False,
- "Deletion of volume failed, id: %s" % volume_id)
-
free_space_after_deletion = self.get_free_space_summary_devices()
self.assertTrue(
@@ -62,7 +56,6 @@ class TestVolumeDeleteTestCases(HeketiBaseClass):
"""
Method to test heketidb volume deletion via heketi-cli
"""
- volume_id_list = []
heketidbexists = False
msg = "Error: Cannot delete volume containing the Heketi database"
@@ -70,18 +63,15 @@ class TestVolumeDeleteTestCases(HeketiBaseClass):
volume_info = heketi_ops.heketi_volume_create(
self.heketi_client_node, self.heketi_server_url,
10, json=True)
- self.assertNotEqual(volume_info, False, "Volume creation failed")
- volume_id_list.append(volume_info["id"])
- self.addCleanup(self.delete_volumes, volume_id_list)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_info["id"])
volume_list_info = heketi_ops.heketi_volume_list(
self.heketi_client_node,
self.heketi_server_url, json=True)
- self.assertNotEqual(volume_list_info, False,
- "Heketi volume list command failed")
-
if volume_list_info["volumes"] == []:
raise ExecutionError("Heketi volume list empty")
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
index 279be053..5e189e49 100644
--- a/tests/functional/common/heketi/test_volume_expansion_and_devices.py
+++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
@@ -5,11 +5,11 @@ from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops, rebalance_ops
from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common import heketi_ops, podcmd
-class TestVolumeExpansionAndDevicesTestCases(HeketiBaseClass):
+class TestVolumeExpansionAndDevicesTestCases(BaseClass):
"""
Class for volume expansion and devices addition related test cases
"""
diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py
index 5b72cc91..f6b0fcf6 100644
--- a/tests/functional/common/heketi/test_volume_multi_req.py
+++ b/tests/functional/common/heketi/test_volume_multi_req.py
@@ -11,7 +11,7 @@ import yaml
from glusto.core import Glusto as g
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (
heketi_volume_list)
from cnslibs.common.naming import (
@@ -192,7 +192,7 @@ def _heketi_name_id_map(vols):
@ddt.ddt
-class TestVolumeMultiReq(HeketiBaseClass):
+class TestVolumeMultiReq(BaseClass):
def setUp(self):
super(TestVolumeMultiReq, self).setUp()
self.volcount = self._count_vols()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index bca0bf66..4f045356 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -1,6 +1,6 @@
from unittest import skip
-from cnslibs.cns.cns_baseclass import GlusterBlockBaseClass
+from cnslibs.common.baseclass import GlusterBlockBaseClass
from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.openshift_ops import (
get_gluster_pod_names_by_pvc_name,
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index 81329e08..3367bab2 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -1,7 +1,7 @@
import time
from unittest import skip
-from cnslibs.cns.cns_baseclass import BaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_ops import (
verify_volume_name_prefix)
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
index 99c1e451..9490ce61 100644
--- a/tests/functional/common/provisioning/test_pv_resize.py
+++ b/tests/functional/common/provisioning/test_pv_resize.py
@@ -16,7 +16,7 @@ from cnslibs.common.openshift_ops import (
wait_for_pod_be_ready,
wait_for_resource_absence)
from cnslibs.common.openshift_version import get_openshift_version
-from cnslibs.cns.cns_baseclass import BaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.exceptions import ExecutionError
from glusto.core import Glusto as g
diff --git a/tests/functional/common/provisioning/test_storage_class_cases.py b/tests/functional/common/provisioning/test_storage_class_cases.py
index e9dc8dbe..f7ccd65b 100644
--- a/tests/functional/common/provisioning/test_storage_class_cases.py
+++ b/tests/functional/common/provisioning/test_storage_class_cases.py
@@ -3,7 +3,7 @@ from unittest import skip
import ddt
from glusto.core import Glusto as g
-from cnslibs.cns import cns_baseclass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.cns_libs import validate_multipath_pod
from cnslibs.common.openshift_ops import (
get_amount_of_gluster_nodes,
@@ -22,7 +22,7 @@ from cnslibs.common.openshift_ops import (
@ddt.ddt
-class TestStorageClassCases(cns_baseclass.BaseClass):
+class TestStorageClassCases(BaseClass):
def create_sc_with_parameter(self, vol_type, success=False, parameter={}):
"""creates storage class, pvc and validates event
diff --git a/tests/functional/common/test_heketi_restart.py b/tests/functional/common/test_heketi_restart.py
index 6e9f2115..a06bf9c6 100644
--- a/tests/functional/common/test_heketi_restart.py
+++ b/tests/functional/common/test_heketi_restart.py
@@ -1,10 +1,12 @@
from jsondiff import diff
-from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.heketi_ops import (
+ heketi_topology_info,
hello_heketi,
heketi_volume_create,
- heketi_topology_info)
+ heketi_volume_delete
+)
from cnslibs.common.openshift_ops import (
get_pod_name_from_dc,
oc_delete,
@@ -12,7 +14,7 @@ from cnslibs.common.openshift_ops import (
wait_for_resource_absence)
-class TestRestartHeketi(HeketiBaseClass):
+class TestRestartHeketi(BaseClass):
def test_restart_heketi_pod(self):
"""Validate restarting heketi pod"""
@@ -22,24 +24,26 @@ class TestRestartHeketi(HeketiBaseClass):
self.heketi_server_url,
size=1, json=True)
self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
- self.addCleanup(self.delete_volumes, vol_info['id'])
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'], raise_on_error=False)
topo_info = heketi_topology_info(self.heketi_client_node,
self.heketi_server_url,
json=True)
# get heketi-pod name
- heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node,
+ heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
self.heketi_dc_name)
# delete heketi-pod (it restarts the pod)
- oc_delete(self.ocp_master_node, 'pod', heketi_pod_name)
- wait_for_resource_absence(self.ocp_master_node,
+ oc_delete(self.ocp_master_node[0], 'pod', heketi_pod_name)
+ wait_for_resource_absence(self.ocp_master_node[0],
'pod', heketi_pod_name)
# get new heketi-pod name
- heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node,
+ heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
self.heketi_dc_name)
- wait_for_pod_be_ready(self.ocp_master_node,
+ wait_for_pod_be_ready(self.ocp_master_node[0],
heketi_pod_name)
# check heketi server is running
@@ -60,4 +64,5 @@ class TestRestartHeketi(HeketiBaseClass):
self.heketi_server_url,
size=2, json=True)
self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
- self.delete_volumes(vol_info['id'])
+ heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url, vol_info['id'])
diff --git a/tests/functional/common/test_node_restart.py b/tests/functional/common/test_node_restart.py
index fc8bec07..6a0969ee 100644
--- a/tests/functional/common/test_node_restart.py
+++ b/tests/functional/common/test_node_restart.py
@@ -2,7 +2,7 @@
import time
from unittest import skip
-from cnslibs.cns.cns_baseclass import BaseClass
+from cnslibs.common.baseclass import BaseClass
from cnslibs.common.openshift_ops import (
check_service_status_on_pod,
get_ocp_gluster_pod_names,