From 7f9209c7d49b1c20cdb6a7626a95172d6df85be2 Mon Sep 17 00:00:00 2001 From: Pranav Date: Tue, 28 Jul 2020 13:48:30 +0530 Subject: [Libfix] Move NFS Ganesha support to GlusterBaseClass Problem: NFS-Ganesha Tests inherits 'NfsGaneshaClusterSetupClass' whereas the other tests inherits 'GlusterBaseClass'. This causes a cyclic dependency when trying to run other modules with Nfs-Ganesha. Fix: 1. Move the Nfs-Ganesha dependencies to GlusterBaseClass 2. Modify the Nfs-Ganesha tests to inherit from GlusterBaseClass 3. Remove setup_nfs_ganesha method call from existing Ganesha tests as its invoked by default from GlusterBaseClass.SetUpClass Change-Id: I1e382fdb2b29585c097dfd0fea0b45edafb6442b Signed-off-by: Pranav --- .../glustolibs/gluster/gluster_base_class.py | 46 +++- .../glustolibs/gluster/nfs_ganesha_libs.py | 259 ++++++++------------- 2 files changed, 143 insertions(+), 162 deletions(-) mode change 100644 => 100755 glustolibs-gluster/glustolibs/gluster/gluster_base_class.py mode change 100644 => 100755 glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py (limited to 'glustolibs-gluster') diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py old mode 100644 new mode 100755 index 0acfa59ed..b43318fe4 --- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py +++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py @@ -60,6 +60,8 @@ from glustolibs.gluster.volume_ops import ( set_volume_options, volume_reset, volume_start) from glustolibs.io.utils import log_mounts_info from glustolibs.gluster.geo_rep_libs import setup_master_and_slave_volumes +from glustolibs.gluster.nfs_ganesha_ops import ( + teardown_nfs_ganesha_cluster) from glustolibs.misc.misc_libs import kill_process @@ -279,7 +281,7 @@ class GlusterBaseClass(TestCase): process_ids=out.strip().split('\n')) if not ret: g.log.error("Unable to kill process {}".format( - out.strip().split('\n'))) + out.strip().split('\n'))) return False if not shared_storage_mounted: cmd_list = ( @@ -986,8 +988,8 @@ class GlusterBaseClass(TestCase): mount_dict['volname'] = cls.slave_volume mount_dict['server'] = cls.mnode_slave mount_dict['mountpoint'] = path_join( - "/mnt", '_'.join([cls.slave_volname, - cls.mount_type])) + "/mnt", '_'.join([cls.slave_volname, + cls.mount_type])) cls.slave_mounts = create_mount_objs(slave_mount_dict_list) # Defining clients from mounts. @@ -1027,6 +1029,30 @@ class GlusterBaseClass(TestCase): datetime.now().strftime('%H_%M_%d_%m_%Y')) cls.glustotest_run_id = g.config['glustotest_run_id'] + if cls.enable_nfs_ganesha: + g.log.info("Setup NFS_Ganesha") + cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes) + cls.servers_in_nfs_ganesha_cluster = ( + cls.servers[:cls.num_of_nfs_ganesha_nodes]) + cls.vips_in_nfs_ganesha_cluster = ( + cls.vips[:cls.num_of_nfs_ganesha_nodes]) + + # Obtain hostname of servers in ganesha cluster + cls.ganesha_servers_hostname = [] + for ganesha_server in cls.servers_in_nfs_ganesha_cluster: + ret, hostname, _ = g.run(ganesha_server, "hostname") + if ret: + raise ExecutionError("Failed to obtain hostname of %s" + % ganesha_server) + hostname = hostname.strip() + g.log.info("Obtained hostname: IP- %s, hostname- %s", + ganesha_server, hostname) + cls.ganesha_servers_hostname.append(hostname) + from glustolibs.gluster.nfs_ganesha_libs import setup_nfs_ganesha + ret = setup_nfs_ganesha(cls) + if not ret: + raise ExecutionError("Failed to setup nfs ganesha") + msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id) g.log.info(msg) cls.inject_msg_in_gluster_logs(msg) @@ -1065,3 +1091,17 @@ class GlusterBaseClass(TestCase): GlusterBaseClass.error_or_failure_exists) g.log.info(ret) return cls.get_super_method(cls, 'doClassCleanups')() + + @classmethod + def delete_nfs_ganesha_cluster(cls): + ret = teardown_nfs_ganesha_cluster( + cls.servers_in_nfs_ganesha_cluster) + if not ret: + g.log.error("Teardown got failed. Hence, cleaning up " + "nfs-ganesha cluster forcefully") + ret = teardown_nfs_ganesha_cluster( + cls.servers_in_nfs_ganesha_cluster, force=True) + if not ret: + raise ExecutionError("Force cleanup of nfs-ganesha " + "cluster failed") + g.log.info("Teardown nfs ganesha cluster succeeded") diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py old mode 100644 new mode 100755 index 92d22a8a4..cda79a5e6 --- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py +++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py @@ -32,180 +32,121 @@ from glustolibs.gluster.nfs_ganesha_ops import ( create_nfs_ganesha_cluster, configure_ports_on_clients, ganesha_client_firewall_settings) -from glustolibs.gluster.gluster_base_class import GlusterBaseClass -from glustolibs.gluster.exceptions import ExecutionError, ConfigError from glustolibs.gluster.volume_libs import is_volume_exported -class NfsGaneshaClusterSetupClass(GlusterBaseClass): - """Creates nfs ganesha cluster +def setup_nfs_ganesha(cls): """ - @classmethod - def setUpClass(cls): - """ - Setup variable for nfs-ganesha tests. - """ - # pylint: disable=too-many-statements, too-many-branches - super(NfsGaneshaClusterSetupClass, cls).setUpClass() - - # Check if enable_nfs_ganesha is set in config file - if not cls.enable_nfs_ganesha: - raise ConfigError("Please enable nfs ganesha in config") - - # Read num_of_nfs_ganesha_nodes from config file and create - # nfs ganesha cluster accordingly - cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes) - cls.servers_in_nfs_ganesha_cluster = ( - cls.servers[:cls.num_of_nfs_ganesha_nodes]) - cls.vips_in_nfs_ganesha_cluster = ( - cls.vips[:cls.num_of_nfs_ganesha_nodes]) - - # Obtain hostname of servers in ganesha cluster - cls.ganesha_servers_hostname = [] - for ganesha_server in cls.servers_in_nfs_ganesha_cluster: - ret, hostname, _ = g.run(ganesha_server, "hostname") - if ret: - raise ExecutionError("Failed to obtain hostname of %s" - % ganesha_server) - hostname = hostname.strip() - g.log.info("Obtained hostname: IP- %s, hostname- %s", - ganesha_server, hostname) - cls.ganesha_servers_hostname.append(hostname) - - @classmethod - def setup_nfs_ganesha(cls): - """ - Create nfs-ganesha cluster if not exists - Set client configurations for nfs-ganesha - - Returns: - True(bool): If setup is successful - False(bool): If setup is failure - """ - # pylint: disable = too-many-statements, too-many-branches - # pylint: disable = too-many-return-statements - cluster_exists = is_nfs_ganesha_cluster_exists( + Create nfs-ganesha cluster if not exists + Set client configurations for nfs-ganesha + + Returns: + True(bool): If setup is successful + False(bool): If setup is failure + """ + # pylint: disable = too-many-statements, too-many-branches + # pylint: disable = too-many-return-statements + cluster_exists = is_nfs_ganesha_cluster_exists( + cls.servers_in_nfs_ganesha_cluster[0]) + if cluster_exists: + is_healthy = is_nfs_ganesha_cluster_in_healthy_state( cls.servers_in_nfs_ganesha_cluster[0]) - if cluster_exists: - is_healthy = is_nfs_ganesha_cluster_in_healthy_state( - cls.servers_in_nfs_ganesha_cluster[0]) - - if is_healthy: - g.log.info("Nfs-ganesha Cluster exists and is in healthy " - "state. Skipping cluster creation...") - else: - g.log.info("Nfs-ganesha Cluster exists and is not in " - "healthy state.") - g.log.info("Tearing down existing cluster which is not in " - "healthy state") - ganesha_ha_file = ("/var/run/gluster/shared_storage/" - "nfs-ganesha/ganesha-ha.conf") - g_node = cls.servers_in_nfs_ganesha_cluster[0] - - g.log.info("Collecting server details of existing " - "nfs ganesha cluster") - - # Check whether ganesha ha file exists - cmd = "[ -f {} ]".format(ganesha_ha_file) - ret, _, _ = g.run(g_node, cmd) - if ret: - g.log.error("Unable to locate %s", ganesha_ha_file) - return False - - # Read contents of ganesha_ha_file - cmd = "cat {}".format(ganesha_ha_file) - ret, ganesha_ha_contents, _ = g.run(g_node, cmd) - if ret: - g.log.error("Failed to read %s", ganesha_ha_file) - return False - - servers_in_existing_cluster = re.findall(r'VIP_(.*)\=.*', - ganesha_ha_contents) - - ret = teardown_nfs_ganesha_cluster( - servers_in_existing_cluster, force=True) - if not ret: - g.log.error("Failed to teardown unhealthy ganesha " - "cluster") - return False - - g.log.info("Existing unhealthy cluster got teardown " - "successfully") - - if (not cluster_exists) or (not is_healthy): - g.log.info("Creating nfs-ganesha cluster of %s nodes" - % str(cls.num_of_nfs_ganesha_nodes)) - g.log.info("Nfs-ganesha cluster node info: %s" - % cls.servers_in_nfs_ganesha_cluster) - g.log.info("Nfs-ganesha cluster vip info: %s" - % cls.vips_in_nfs_ganesha_cluster) - - ret = create_nfs_ganesha_cluster( - cls.ganesha_servers_hostname, - cls.vips_in_nfs_ganesha_cluster) + + if is_healthy: + g.log.info("Nfs-ganesha Cluster exists and is in healthy " + "state. Skipping cluster creation...") + else: + g.log.info("Nfs-ganesha Cluster exists and is not in " + "healthy state.") + g.log.info("Tearing down existing cluster which is not in " + "healthy state") + ganesha_ha_file = ("/var/run/gluster/shared_storage/" + "nfs-ganesha/ganesha-ha.conf") + g_node = cls.servers_in_nfs_ganesha_cluster[0] + + g.log.info("Collecting server details of existing " + "nfs ganesha cluster") + + # Check whether ganesha ha file exists + cmd = "[ -f {} ]".format(ganesha_ha_file) + ret, _, _ = g.run(g_node, cmd) + if ret: + g.log.error("Unable to locate %s", ganesha_ha_file) + return False + + # Read contents of ganesha_ha_file + cmd = "cat {}".format(ganesha_ha_file) + ret, ganesha_ha_contents, _ = g.run(g_node, cmd) + if ret: + g.log.error("Failed to read %s", ganesha_ha_file) + return False + + servers_in_existing_cluster = re.findall(r'VIP_(.*)\=.*', + ganesha_ha_contents) + + ret = teardown_nfs_ganesha_cluster( + servers_in_existing_cluster, force=True) if not ret: - g.log.error("Creation of nfs-ganesha cluster failed") + g.log.error("Failed to teardown unhealthy ganesha " + "cluster") return False - if not is_nfs_ganesha_cluster_in_healthy_state( - cls.servers_in_nfs_ganesha_cluster[0]): - g.log.error("Nfs-ganesha cluster is not healthy") - return False - g.log.info("Nfs-ganesha Cluster exists is in healthy state") + g.log.info("Existing unhealthy cluster got teardown " + "successfully") - ret = configure_ports_on_clients(cls.clients) - if not ret: - g.log.error("Failed to configure ports on clients") - return False + if (not cluster_exists) or (not is_healthy): + g.log.info("Creating nfs-ganesha cluster of %s nodes" + % str(cls.num_of_nfs_ganesha_nodes)) + g.log.info("Nfs-ganesha cluster node info: %s" + % cls.servers_in_nfs_ganesha_cluster) + g.log.info("Nfs-ganesha cluster vip info: %s" + % cls.vips_in_nfs_ganesha_cluster) - ret = ganesha_client_firewall_settings(cls.clients) + ret = create_nfs_ganesha_cluster( + cls.ganesha_servers_hostname, + cls.vips_in_nfs_ganesha_cluster) if not ret: - g.log.error("Failed to do firewall setting in clients") + g.log.error("Creation of nfs-ganesha cluster failed") return False - for server in cls.servers: - for client in cls.clients: - cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then " - "echo \"%s %s\" >> /etc/hosts; fi" - % (client, socket.gethostbyname(client), client)) - ret, _, _ = g.run(server, cmd) - if ret != 0: - g.log.error("Failed to add entry of client %s in " - "/etc/hosts of server %s" - % (client, server)) + if not is_nfs_ganesha_cluster_in_healthy_state( + cls.servers_in_nfs_ganesha_cluster[0]): + g.log.error("Nfs-ganesha cluster is not healthy") + return False + g.log.info("Nfs-ganesha Cluster exists is in healthy state") + + ret = configure_ports_on_clients(cls.clients) + if not ret: + g.log.error("Failed to configure ports on clients") + return False + ret = ganesha_client_firewall_settings(cls.clients) + if not ret: + g.log.error("Failed to do firewall setting in clients") + return False + + for server in cls.servers: for client in cls.clients: - for server in cls.servers: - cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then " - "echo \"%s %s\" >> /etc/hosts; fi" - % (server, socket.gethostbyname(server), server)) - ret, _, _ = g.run(client, cmd) - if ret != 0: - g.log.error("Failed to add entry of server %s in " - "/etc/hosts of client %s" - % (server, client)) - return True - - @classmethod - def tearDownClass(cls, delete_nfs_ganesha_cluster=True): - """Teardown nfs ganesha cluster. - """ - super(NfsGaneshaClusterSetupClass, cls).tearDownClass() - - if delete_nfs_ganesha_cluster: - ret = teardown_nfs_ganesha_cluster( - cls.servers_in_nfs_ganesha_cluster) - if not ret: - g.log.error("Teardown got failed. Hence, cleaning up " - "nfs-ganesha cluster forcefully") - ret = teardown_nfs_ganesha_cluster( - cls.servers_in_nfs_ganesha_cluster, force=True) - if not ret: - raise ExecutionError("Force cleanup of nfs-ganesha " - "cluster failed") - g.log.info("Teardown nfs ganesha cluster succeeded") - else: - g.log.info("Skipping teardown nfs-ganesha cluster...") + cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then " + "echo \"%s %s\" >> /etc/hosts; fi" + % (client, socket.gethostbyname(client), client)) + ret, _, _ = g.run(server, cmd) + if ret != 0: + g.log.error("Failed to add entry of client %s in " + "/etc/hosts of server %s" + % (client, server)) + + for client in cls.clients: + for server in cls.servers: + cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then " + "echo \"%s %s\" >> /etc/hosts; fi" + % (server, socket.gethostbyname(server), server)) + ret, _, _ = g.run(client, cmd) + if ret != 0: + g.log.error("Failed to add entry of server %s in " + "/etc/hosts of client %s" + % (server, client)) + return True def wait_for_nfs_ganesha_volume_to_get_exported(mnode, volname, timeout=120): -- cgit