diff options
11 files changed, 80 insertions, 270 deletions
diff --git a/cns-libs/cnslibs/cns/cns_baseclass.py b/cns-libs/cnslibs/cns/cns_baseclass.py index 234511aa..068c25c3 100644 --- a/cns-libs/cnslibs/cns/cns_baseclass.py +++ b/cns-libs/cnslibs/cns/cns_baseclass.py @@ -49,7 +49,6 @@ class CnsBaseClass(unittest.TestCase): cls.cns_username = g.config['cns']['setup']['cns_username'] cls.cns_password = g.config['cns']['setup']['cns_password'] cls.cns_project_name = g.config['cns']['setup']['cns_project_name'] - cls.deployment_type = g.config['cns']['deployment_type'] # Initializes heketi config variables heketi_config = g.config['cns']['heketi_config'] diff --git a/cns-libs/cnslibs/common/heketi_libs.py b/cns-libs/cnslibs/common/heketi_libs.py index c6529af4..7ac5c6a6 100644 --- a/cns-libs/cnslibs/common/heketi_libs.py +++ b/cns-libs/cnslibs/common/heketi_libs.py @@ -8,8 +8,7 @@ from cnslibs.common.exceptions import ExecutionError, ConfigError from cnslibs.common.heketi_ops import (hello_heketi, heketi_volume_delete, heketi_blockvolume_delete) -from cnslibs.common.openshift_ops import (oc_login, switch_oc_project, - get_ocp_gluster_pod_names) +from cnslibs.common import openshift_ops class HeketiBaseClass(unittest.TestCase): @@ -33,8 +32,6 @@ class HeketiBaseClass(unittest.TestCase): cls.ocp_master_nodes = g.config['ocp_servers']['master'].keys() cls.ocp_master_node = cls.ocp_master_nodes[0] - cls.deployment_type = g.config['cns']['deployment_type'] - heketi_config = g.config['cns']['heketi_config'] cls.heketi_dc_name = heketi_config['heketi_dc_name'] cls.heketi_service_name = heketi_config['heketi_service_name'] @@ -65,19 +62,15 @@ class HeketiBaseClass(unittest.TestCase): raise ConfigError("Heketi server %s is not alive" % cls.heketi_server_url) - if cls.deployment_type == "cns": - if not oc_login(cls.ocp_master_node, cls.cns_username, - cls.cns_password): - raise ExecutionError("Failed to do oc login on node %s" - % cls.ocp_master_node) - - if not switch_oc_project(cls.ocp_master_node, - cls.cns_project_name): - raise ExecutionError("Failed to switch oc project on node %s" - % cls.ocp_master_node) - - cls.gluster_pods = get_ocp_gluster_pod_names(cls.ocp_master_node) - g.pod_name = cls.gluster_pods[0] + # Switch to the storage project + if not openshift_ops.oc_login( + cls.ocp_master_node, cls.cns_username, cls.cns_password): + raise ExecutionError("Failed to do oc login on node %s" + % cls.ocp_master_node) + if not openshift_ops.switch_oc_project( + cls.ocp_master_node, cls.cns_project_name): + raise ExecutionError("Failed to switch oc project on node %s" + % cls.ocp_master_node) # Have a unique string to recognize the test run for logging if 'glustotest_run_id' not in g.config: diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py index cc84098e..d9511980 100644 --- a/cns-libs/cnslibs/common/openshift_ops.py +++ b/cns-libs/cnslibs/common/openshift_ops.py @@ -10,17 +10,12 @@ import types from glusto.core import Glusto as g from glustolibs.gluster import volume_ops -from glustolibs.gluster.brick_libs import ( - are_bricks_online, - get_all_bricks, - get_online_bricks_list) import mock import yaml from cnslibs.common import command from cnslibs.common import exceptions from cnslibs.common import openshift_version -from cnslibs.common import podcmd from cnslibs.common import utils from cnslibs.common import waiter from cnslibs.common.heketi_ops import ( @@ -1235,96 +1230,6 @@ def get_vol_names_from_pv(hostname, pv_name): return vol_dict -@podcmd.GlustoPod() -def verify_brick_count_gluster_vol(hostname, brick_count, - gluster_vol): - ''' - Verify brick count for gluster volume - Args: - hostname (str): hostname on which we want - to check brick count - brick_count (int): integer value to verify - gluster_vol (str): gluster vol name - Returns: - bool: True, if successful - otherwise raise Exception - ''' - gluster_pod = get_ocp_gluster_pod_names(hostname)[1] - p = podcmd.Pod(hostname, gluster_pod) - out = get_online_bricks_list(p, gluster_vol) - if brick_count == len(out): - g.log.info("successfully verified brick count %s " - "for vol %s" % (brick_count, gluster_vol)) - return True - err_msg = ("verification of brick count %s for vol %s" - "failed, count found %s" % ( - brick_count, gluster_vol, len(out))) - raise AssertionError(err_msg) - - -@podcmd.GlustoPod() -def verify_brick_status_online_gluster_vol(hostname, - gluster_vol): - ''' - Verify if all the bricks are online for the - gluster volume - Args: - hostname (str): hostname on which we want - to check brick status - gluster_vol (str): gluster vol name - Returns: - bool: True, if successful - otherwise raise Exception - ''' - gluster_pod = get_ocp_gluster_pod_names(hostname)[1] - p = podcmd.Pod(hostname, gluster_pod) - brick_list = get_all_bricks(p, gluster_vol) - if brick_list is None: - error_msg = ("failed to get brick list for vol" - " %s" % gluster_vol) - g.log.error(error_msg) - raise exceptions.ExecutionError(error_msg) - out = are_bricks_online(p, gluster_vol, brick_list) - if out: - g.log.info("verification of brick status as online" - " for gluster vol %s successful" - % gluster_vol) - return True - error_msg = ("verification of brick status as online" - " for gluster vol %s failed" % gluster_vol) - - g.log.error(error_msg) - raise exceptions.ExecutionError(error_msg) - - -def verify_gluster_vol_for_pvc(hostname, pvc_name): - ''' - Verify gluster volume has been created for - the corresponding PVC - Also checks if all the bricks of that gluster - volume are online - Args: - hostname (str): hostname on which we want - to find gluster vol - pvc_name (str): pvc_name for which we - want to find corresponding - gluster vol - Returns: - bool: True if successful - otherwise raise Exception - ''' - verify_pvc_status_is_bound(hostname, pvc_name) - pv_name = get_pv_name_from_pvc(hostname, pvc_name) - vol_dict = get_vol_names_from_pv(hostname, pv_name) - gluster_vol = vol_dict["gluster_vol"] - verify_brick_status_online_gluster_vol(hostname, - gluster_vol) - - g.log.info("verification of gluster vol %s for pvc %s is" - "successful" % (gluster_vol, pvc_name)) - return True - - def get_events(hostname, obj_name=None, obj_namespace=None, obj_type=None, event_reason=None, event_type=None): diff --git a/cns-libs/cnslibs/common/podcmd.py b/cns-libs/cnslibs/common/podcmd.py index 0613c206..2673461b 100644 --- a/cns-libs/cnslibs/common/podcmd.py +++ b/cns-libs/cnslibs/common/podcmd.py @@ -51,6 +51,8 @@ import types from glusto.core import Glusto as g +from cnslibs.common import openshift_ops + # Define a namedtuple that allows us to address pods instead of just # hosts, Pod = namedtuple('Pod', 'node podname') @@ -61,11 +63,15 @@ def run(target, command, log_level=None, orig_run=g.run): Wraps glusto's run function. Args: - target (str|Pod): If target is a anything other than a Pod - object the command will be run directly on the target - (hostname or IP). If target is a Pod object it will run - on the named pod, going through the node named in the - Pod object. + target (str|Pod): If target is str object and + it equals to 'auto_get_gluster_endpoint', then + Gluster endpoint gets autocalculated to be any of + Gluster PODs or nodes depending on the deployment type of + a Gluster cluster. + If it is str object with other value, then it is considered to be + an endpoint for command. + If 'target' is of the 'Pod' type, + then command will run on the specified POD. command (str|list): Command to run. log_level (str|None): log level to be passed on to glusto's run method @@ -78,6 +84,15 @@ def run(target, command, log_level=None, orig_run=g.run): # NOTE: orig_run captures the glusto run method at function # definition time in order to capture the method before # any additional monkeypatching by other code + + if target == 'auto_get_gluster_endpoint': + ocp_client_node = list(g.config['ocp_servers']['client'].keys())[0] + gluster_pods = openshift_ops.get_ocp_gluster_pod_names(ocp_client_node) + if gluster_pods: + target = Pod(ocp_client_node, gluster_pods[0]) + else: + target = list(g.config.get("gluster_servers", {}).keys())[0] + if isinstance(target, Pod): prefix = ['oc', 'rsh', target.podname] if isinstance(command, types.StringTypes): diff --git a/tests/cns_tests_sample_config.yml b/tests/cns_tests_sample_config.yml index 89f0f162..07ed23eb 100644 --- a/tests/cns_tests_sample_config.yml +++ b/tests/cns_tests_sample_config.yml @@ -41,9 +41,6 @@ cns: cns_project_name: "storage-project" cns_username: "test-admin" cns_password: - - # 'deployment_type' can be cns|crs - deployment_type: 'cns' trusted_storage_pool_list: - [gluster_server1, gluster_server2] heketi_config: diff --git a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py index 43d222a2..5d5e867c 100644 --- a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py +++ b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py @@ -4,7 +4,6 @@ from glustolibs.gluster.volume_ops import get_volume_info from cnslibs.common import exceptions from cnslibs.common import heketi_libs from cnslibs.common import heketi_ops -from cnslibs.common import openshift_ops from cnslibs.common import podcmd @@ -122,12 +121,6 @@ class TestDisableHeketiDevice(heketi_libs.HeketiBaseClass): name = out["name"] # Get gluster volume info - if self.deployment_type == "cns": - gluster_pod = openshift_ops.get_ocp_gluster_pod_names( - self.heketi_client_node)[1] - p = podcmd.Pod(self.heketi_client_node, gluster_pod) - out = get_volume_info(p, volname=name) - else: - out = get_volume_info(self.heketi_client_node, volname=name) - self.assertTrue(out, "Failed to get '%s' volume info." % name) + vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name) + self.assertTrue(vol_info, "Failed to get '%s' volume info." % name) g.log.info("Successfully got the '%s' volume info." % name) diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py index 7f2a61da..7963413b 100644 --- a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py +++ b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py @@ -16,7 +16,6 @@ from cnslibs.common.heketi_ops import (heketi_volume_create, heketi_node_info, heketi_node_list, heketi_node_delete) -from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names from cnslibs.common import podcmd @@ -53,24 +52,18 @@ class TestHeketiVolume(HeketiBaseClass): g.log.info("Heketi volumes successfully listed") g.log.info("List gluster volumes") - if self.deployment_type == "cns": - gluster_pod = get_ocp_gluster_pod_names( - self.heketi_client_node)[1] - p = podcmd.Pod(self.heketi_client_node, gluster_pod) - out = get_volume_list(p) - else: - out = get_volume_list(self.heketi_client_node) - self.assertTrue(out, ("Unable to get volumes list")) + vol_list = get_volume_list('auto_get_gluster_endpoint') + self.assertTrue(vol_list, ("Unable to get volumes list")) g.log.info("Successfully got the volumes list") # Check the volume count are equal self.assertEqual( - len(volumes["volumes"]), len(out), + len(volumes["volumes"]), len(vol_list), "Lengths of gluster '%s' and heketi '%s' volume lists are " - "not equal." % (out, volumes) + "not equal." % (vol_list, volumes) ) g.log.info("Heketi volumes list %s and" - " gluster volumes list %s" % ((volumes), (out))) + " gluster volumes list %s" % (volumes, vol_list)) @podcmd.GlustoPod() def test_create_vol_and_retrieve_vol_info(self): @@ -97,15 +90,8 @@ class TestHeketiVolume(HeketiBaseClass): g.log.info("Successfully got the heketi volume info") name = out["name"] - if self.deployment_type == "cns": - gluster_pod = get_ocp_gluster_pod_names( - self.heketi_client_node)[1] - p = podcmd.Pod(self.heketi_client_node, gluster_pod) - out = get_volume_info(p, volname=name) - else: - out = get_volume_info(self.heketi_client_node, - volname=name) - self.assertTrue(out, ("Failed to get volume info %s" % name)) + vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name) + self.assertTrue(vol_info, "Failed to get volume info %s" % name) g.log.info("Successfully got the volume info %s" % name) def test_to_check_deletion_of_cluster(self): diff --git a/tests/functional/common/heketi/heketi_tests/test_node_info.py b/tests/functional/common/heketi/heketi_tests/test_node_info.py index 016b3ec4..3f956d62 100644 --- a/tests/functional/common/heketi/heketi_tests/test_node_info.py +++ b/tests/functional/common/heketi/heketi_tests/test_node_info.py @@ -4,7 +4,6 @@ from glustolibs.gluster.peer_ops import get_pool_list from cnslibs.common.heketi_libs import HeketiBaseClass from cnslibs.common import heketi_ops, podcmd -from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names class TestHeketiVolume(HeketiBaseClass): @@ -44,13 +43,7 @@ class TestHeketiVolume(HeketiBaseClass): hostname = [] g.log.info("Get the pool list") - if self.deployment_type == "cns": - gluster_pod = get_ocp_gluster_pod_names( - self.heketi_client_node)[1] - p = podcmd.Pod(self.heketi_client_node, gluster_pod) - list_of_pools = get_pool_list(p) - else: - list_of_pools = get_pool_list(self.heketi_client_node) + list_of_pools = get_pool_list('auto_get_gluster_endpoint') self.assertTrue(list_of_pools, ("Failed to get the " "pool list from gluster pods/nodes")) g.log.info("Successfully got the pool list from gluster pods/nodes") diff --git a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py index 3dd4230b..561e1342 100644 --- a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py +++ b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py @@ -14,7 +14,6 @@ from cnslibs.common.heketi_ops import (heketi_node_list, heketi_volume_create, heketi_volume_list, heketi_volume_delete) -from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names from cnslibs.common import podcmd @@ -107,12 +106,8 @@ class TestHeketiVolume(HeketiBaseClass): # Get gluster volume info g.log.info("Get gluster volume '%s' info" % vol_name) - if self.deployment_type == "cns": - gluster_pod = get_ocp_gluster_pod_names(self.master_node)[0] - p = podcmd.Pod(self.master_node, gluster_pod) - gluster_vol = get_volume_info(p, volname=vol_name) - else: - gluster_vol = get_volume_info(self.gluster_node, volname=vol_name) + gluster_vol = get_volume_info( + 'auto_get_gluster_endpoint', volname=vol_name) self.assertTrue( gluster_vol, "Failed to get volume '%s' info" % vol_name) g.log.info("Successfully got volume '%s' info" % vol_name) @@ -157,13 +152,9 @@ class TestHeketiVolume(HeketiBaseClass): # Check the gluster volume list g.log.info("Get the gluster volume list") - if self.deployment_type == "cns": - gluster_pod = get_ocp_gluster_pod_names(self.master_node)[0] - p = podcmd.Pod(self.master_node, gluster_pod) - gluster_volumes = get_volume_list(p) - else: - gluster_volumes = get_volume_list(self.gluster_node) + gluster_volumes = get_volume_list('auto_get_gluster_endpoint') self.assertTrue(gluster_volumes, "Unable to get Gluster volume list") + g.log.info("Successfully got Gluster volume list" % gluster_volumes) self.assertNotIn(vol_id, gluster_volumes) self.assertNotIn(vol_name, gluster_volumes) diff --git a/tests/functional/common/heketi/test_volume_creation.py b/tests/functional/common/heketi/test_volume_creation.py index 07dba094..b9f2a680 100644 --- a/tests/functional/common/heketi/test_volume_creation.py +++ b/tests/functional/common/heketi/test_volume_creation.py @@ -4,7 +4,6 @@ from glustolibs.gluster import volume_ops from cnslibs.common import exceptions from cnslibs.common import heketi_libs from cnslibs.common import heketi_ops -from cnslibs.common import openshift_ops from cnslibs.common import podcmd @@ -60,25 +59,14 @@ class TestVolumeCreationTestCases(heketi_libs.HeketiBaseClass): "Hosts and gluster servers not matching for %s" % volume_id) - if self.deployment_type == "cns": - gluster_pod = openshift_ops.get_ocp_gluster_pod_names( - self.heketi_client_node)[1] + volume_info = volume_ops.get_volume_info( + 'auto_get_gluster_endpoint', volume_name) + self.assertIsNotNone(volume_info, "get_volume_info returned None") - p = podcmd.Pod(self.heketi_client_node, gluster_pod) - - volume_info = volume_ops.get_volume_info(p, volume_name) - volume_status = volume_ops.get_volume_status(p, volume_name) - - elif self.deployment_type == "crs": - volume_info = volume_ops.get_volume_info( - self.heketi_client_node, volume_name) - volume_status = volume_ops.get_volume_status( - self.heketi_client_node, volume_name) - - self.assertNotEqual(volume_info, None, - "get_volume_info returned None") - self.assertNotEqual(volume_status, None, - "get_volume_status returned None") + volume_status = volume_ops.get_volume_status( + 'auto_get_gluster_endpoint', volume_name) + self.assertIsNotNone( + volume_status, "get_volume_status returned None") self.assertEqual(int(volume_info[volume_name]["status"]), 1, "Volume %s status down" % volume_id) diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py index 8d590fb6..262e3d6b 100644 --- a/tests/functional/common/heketi/test_volume_expansion_and_devices.py +++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py @@ -8,7 +8,6 @@ from glustolibs.gluster import volume_ops, rebalance_ops from cnslibs.common.exceptions import ExecutionError from cnslibs.common.heketi_libs import HeketiBaseClass -from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names from cnslibs.common import heketi_ops, podcmd @@ -19,101 +18,52 @@ class TestVolumeExpansionAndDevicesTestCases(HeketiBaseClass): @podcmd.GlustoPod() def get_num_of_bricks(self, volume_name): - """ - Method to determine number of - bricks at present in the volume - """ - brick_info = [] - - if self.deployment_type == "cns": - - gluster_pod = get_ocp_gluster_pod_names( - self.heketi_client_node)[1] - - p = podcmd.Pod(self.heketi_client_node, gluster_pod) - - volume_info_before_expansion = volume_ops.get_volume_info( - p, volume_name) - - elif self.deployment_type == "crs": - volume_info_before_expansion = volume_ops.get_volume_info( - self.heketi_client_node, volume_name) + """Method to determine number of bricks at present in the volume.""" + volume_info = volume_ops.get_volume_info( + 'auto_get_gluster_endpoint', volume_name) self.assertIsNotNone( - volume_info_before_expansion, - "Volume info is None") + volume_info, "'%s' volume info is None" % volume_name) - for brick_details in (volume_info_before_expansion - [volume_name]["bricks"]["brick"]): - - brick_info.append(brick_details["name"]) - - num_of_bricks = len(brick_info) - - return num_of_bricks + return len([b for b in volume_info[volume_name]["bricks"]["brick"]]) @podcmd.GlustoPod() def get_rebalance_status(self, volume_name): - """ - Rebalance status after expansion - """ - if self.deployment_type == "cns": - gluster_pod = get_ocp_gluster_pod_names( - self.heketi_client_node)[1] - - p = podcmd.Pod(self.heketi_client_node, gluster_pod) - - wait_reb = rebalance_ops.wait_for_rebalance_to_complete( - p, volume_name) - self.assertTrue(wait_reb, "Rebalance not complete") - - reb_status = rebalance_ops.get_rebalance_status( - p, volume_name) - - elif self.deployment_type == "crs": - wait_reb = rebalance_ops.wait_for_rebalance_to_complete( - self.heketi_client_node, volume_name) - self.assertTrue(wait_reb, "Rebalance not complete") - - reb_status = rebalance_ops.get_rebalance_status( - self.heketi_client_node, volume_name) + """Rebalance status after expansion.""" + wait_reb = rebalance_ops.wait_for_rebalance_to_complete( + 'auto_get_gluster_endpoint', volume_name) + self.assertTrue( + wait_reb, + "Rebalance for '%s' volume was not completed." % volume_name) - self.assertEqual(reb_status["aggregate"]["statusStr"], - "completed", "Rebalance not yet completed") + reb_status = rebalance_ops.get_rebalance_status( + 'auto_get_gluster_endpoint', volume_name) + self.assertEqual( + reb_status["aggregate"]["statusStr"], "completed", + "Failed to get rebalance status for '%s' volume." % volume_name) @podcmd.GlustoPod() def get_brick_and_volume_status(self, volume_name): - """ - Status of each brick in a volume - for background validation - """ - brick_info = [] - - if self.deployment_type == "cns": - gluster_pod = get_ocp_gluster_pod_names( - self.heketi_client_node)[1] + """Status of each brick in a volume for background validation.""" - p = podcmd.Pod(self.heketi_client_node, gluster_pod) - - volume_info = volume_ops.get_volume_info(p, volume_name) - volume_status = volume_ops.get_volume_status(p, volume_name) - - elif self.deployment_type == "crs": - volume_info = volume_ops.get_volume_info( - self.heketi_client_node, volume_name) - volume_status = volume_ops.get_volume_status( - self.heketi_client_node, volume_name) + volume_info = volume_ops.get_volume_info( + 'auto_get_gluster_endpoint', volume_name) + self.assertIsNotNone( + volume_info, "'%s' volume info is empty" % volume_name) - self.assertIsNotNone(volume_info, "Volume info is empty") - self.assertIsNotNone(volume_status, "Volume status is empty") + volume_status = volume_ops.get_volume_status( + 'auto_get_gluster_endpoint', volume_name) + self.assertIsNotNone( + volume_status, "'%s' volume status is empty" % volume_name) self.assertEqual(int(volume_info[volume_name]["status"]), 1, "Volume not up") + + brick_info = [] for brick_details in volume_info[volume_name]["bricks"]["brick"]: brick_info.append(brick_details["name"]) - - if brick_info == []: - raise ExecutionError("Brick details empty for %s" % volume_name) + self.assertTrue( + brick_info, "Brick details are empty for %s" % volume_name) for brick in brick_info: brick_data = brick.strip().split(":") |