summaryrefslogtreecommitdiffstats
path: root/glustolibs-gluster/glustolibs
diff options
context:
space:
mode:
authorJilju Joy <jijoy@redhat.com>2018-12-17 16:27:47 +0530
committerAkarsha Rai <akrai@redhat.com>2019-04-15 06:46:19 +0000
commitd76641e56a1cc6edd710675486a492d958a9697a (patch)
treeb5c0332ae3c991d74abf14b90dafeeecc6d7f5de /glustolibs-gluster/glustolibs
parent49fa677ae7978b8e7c3c0c02e1e3b6b15406fd08 (diff)
Avoid using gdeploy for nfs-ganesha setup, teardown, acl and root-squash libs
* Removed dependency on gdeploy for setup and teardown of ganesha cluster * Correction of pylint errors * Added new method to set acl * Added new method to set root-squash * Added new method for refresh-config * Removed setUp and tearDown methods from NfsGaneshaClusterSetupClass Change-Id: I1266fe7c09e1fed148ca222712e15932a5ad928c
Diffstat (limited to 'glustolibs-gluster/glustolibs')
-rw-r--r--glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py173
-rw-r--r--glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py736
2 files changed, 589 insertions, 320 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
index 863ba40d7..20dbe430d 100644
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
@@ -21,15 +21,19 @@
management node.
"""
+import time
+import socket
+import re
from glusto.core import Glusto as g
from glustolibs.gluster.nfs_ganesha_ops import (
- is_nfs_ganesha_cluster_exists,
- is_nfs_ganesha_cluster_in_healthy_state,
- teardown_nfs_ganesha_cluster,
- create_nfs_ganesha_cluster,
- export_nfs_ganesha_volume,
- unexport_nfs_ganesha_volume,
- set_nfs_ganesha_client_configuration)
+ is_nfs_ganesha_cluster_exists,
+ is_nfs_ganesha_cluster_in_healthy_state,
+ teardown_nfs_ganesha_cluster,
+ create_nfs_ganesha_cluster,
+ export_nfs_ganesha_volume,
+ unexport_nfs_ganesha_volume,
+ configure_ports_on_clients,
+ ganesha_client_firewall_settings)
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError, ConfigError
from glustolibs.gluster.peer_ops import peer_probe_servers, peer_status
@@ -41,9 +45,6 @@ from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume,
from glustolibs.gluster.mount_ops import create_mount_objs
from glustolibs.io.utils import log_mounts_info, wait_for_io_to_complete
from glustolibs.misc.misc_libs import upload_scripts
-import time
-import socket
-import re
class NfsGaneshaClusterSetupClass(GlusterBaseClass):
@@ -51,15 +52,10 @@ class NfsGaneshaClusterSetupClass(GlusterBaseClass):
"""
@classmethod
def setUpClass(cls):
- """Setup nfs-ganesha cluster
- tests.
"""
-
- # Check if gdeploy is installed on glusto-tests management node.
- ret, _, _ = g.run_local("gdeploy --version")
- if ret != 0:
- raise ConfigError("Please install gdeploy to run the scripts")
-
+ Setup variable for nfs-ganesha tests.
+ """
+ # pylint: disable=too-many-statements, too-many-branches
GlusterBaseClass.setUpClass.im_func(cls)
# Check if enable_nfs_ganesha is set in config file
@@ -74,11 +70,37 @@ class NfsGaneshaClusterSetupClass(GlusterBaseClass):
cls.vips_in_nfs_ganesha_cluster = (
cls.vips[:cls.num_of_nfs_ganesha_nodes])
- # Create nfs ganesha cluster if not exists already
- if (is_nfs_ganesha_cluster_exists(
- cls.servers_in_nfs_ganesha_cluster[0])):
- if is_nfs_ganesha_cluster_in_healthy_state(
- cls.servers_in_nfs_ganesha_cluster[0]):
+ # Obtain hostname of servers in ganesha cluster
+ cls.ganesha_servers_hostname = []
+ for ganesha_server in cls.servers_in_nfs_ganesha_cluster:
+ ret, hostname, _ = g.run(ganesha_server, "hostname")
+ if ret:
+ raise ExecutionError("Failed to obtain hostname of %s"
+ % ganesha_server)
+ hostname = hostname.strip()
+ g.log.info("Obtained hostname: IP- %s, hostname- %s",
+ ganesha_server, hostname)
+ cls.ganesha_servers_hostname.append(hostname)
+
+ @classmethod
+ def setup_nfs_ganesha(cls):
+ """
+ Create nfs-ganesha cluster if not exists
+ Set client configurations for nfs-ganesha
+
+ Returns:
+ True(bool): If setup is successful
+ False(bool): If setup is failure
+ """
+ # pylint: disable = too-many-statements, too-many-branches
+ # pylint: disable = too-many-return-statements
+ cluster_exists = is_nfs_ganesha_cluster_exists(
+ cls.servers_in_nfs_ganesha_cluster[0])
+ if cluster_exists:
+ is_healthy = is_nfs_ganesha_cluster_in_healthy_state(
+ cls.servers_in_nfs_ganesha_cluster[0])
+
+ if is_healthy:
g.log.info("Nfs-ganesha Cluster exists and is in healthy "
"state. Skipping cluster creation...")
else:
@@ -93,16 +115,17 @@ class NfsGaneshaClusterSetupClass(GlusterBaseClass):
"nfs ganesha cluster")
conn = g.rpyc_get_connection(
cls.servers_in_nfs_ganesha_cluster[0], user="root")
- if conn is None:
+ if not conn:
tmp_node = cls.servers_in_nfs_ganesha_cluster[0]
- raise ExecutionError("Unable to get connection to 'root' "
- " of node %s "
- % tmp_node)
+ g.log.error("Unable to get connection to 'root' of node"
+ " %s", tmp_node)
+ return False
+
if not conn.modules.os.path.exists(ganesha_ha_file):
- raise ExecutionError("Unable to locate %s"
- % ganesha_ha_file)
- with conn.builtin.open(ganesha_ha_file, "r") as fh:
- ganesha_ha_contents = fh.read()
+ g.log.error("Unable to locate %s", ganesha_ha_file)
+ return False
+ with conn.builtin.open(ganesha_ha_file, "r") as fhand:
+ ganesha_ha_contents = fhand.read()
g.rpyc_close_connection(
host=cls.servers_in_nfs_ganesha_cluster[0], user="root")
servers_in_existing_cluster = re.findall(r'VIP_(.*)\=.*',
@@ -111,45 +134,43 @@ class NfsGaneshaClusterSetupClass(GlusterBaseClass):
ret = teardown_nfs_ganesha_cluster(
servers_in_existing_cluster, force=True)
if not ret:
- raise ExecutionError("Failed to teardown nfs "
- "ganesha cluster")
- g.log.info("Existing cluster got teardown successfully")
- g.log.info("Creating nfs-ganesha cluster of %s nodes"
- % str(cls.num_of_nfs_ganesha_nodes))
- g.log.info("Nfs-ganesha cluster node info: %s"
- % cls.servers_in_nfs_ganesha_cluster)
- g.log.info("Nfs-ganesha cluster vip info: %s"
- % cls.vips_in_nfs_ganesha_cluster)
- ret = create_nfs_ganesha_cluster(
- cls.servers_in_nfs_ganesha_cluster,
- cls.vips_in_nfs_ganesha_cluster)
- if not ret:
- raise ExecutionError("Failed to create "
- "nfs-ganesha cluster")
- else:
+ g.log.error("Failed to teardown unhealthy ganesha "
+ "cluster")
+ return False
+
+ g.log.info("Existing unhealthy cluster got teardown "
+ "successfully")
+
+ if (not cluster_exists) or (not is_healthy):
g.log.info("Creating nfs-ganesha cluster of %s nodes"
% str(cls.num_of_nfs_ganesha_nodes))
g.log.info("Nfs-ganesha cluster node info: %s"
% cls.servers_in_nfs_ganesha_cluster)
g.log.info("Nfs-ganesha cluster vip info: %s"
% cls.vips_in_nfs_ganesha_cluster)
+
ret = create_nfs_ganesha_cluster(
- cls.servers_in_nfs_ganesha_cluster,
+ cls.ganesha_servers_hostname,
cls.vips_in_nfs_ganesha_cluster)
if not ret:
- raise ExecutionError("Failed to create "
- "nfs-ganesha cluster")
+ g.log.error("Creation of nfs-ganesha cluster failed")
+ return False
- if is_nfs_ganesha_cluster_in_healthy_state(
- cls.servers_in_nfs_ganesha_cluster[0]):
- g.log.info("Nfs-ganesha Cluster exists is in healthy state")
- else:
- raise ExecutionError("Nfs-ganesha Cluster setup Failed")
+ if not is_nfs_ganesha_cluster_in_healthy_state(
+ cls.servers_in_nfs_ganesha_cluster[0]):
+ g.log.error("Nfs-ganesha cluster is not healthy")
+ return False
+ g.log.info("Nfs-ganesha Cluster exists is in healthy state")
+
+ ret = configure_ports_on_clients(cls.clients)
+ if not ret:
+ g.log.error("Failed to configure ports on clients")
+ return False
- ret = set_nfs_ganesha_client_configuration(cls.clients)
+ ret = ganesha_client_firewall_settings(cls.clients)
if not ret:
- raise ExecutionError("Failed to do client nfs ganesha "
- "configuration")
+ g.log.error("Failed to do firewall setting in clients")
+ return False
for server in cls.servers:
for client in cls.clients:
@@ -172,16 +193,7 @@ class NfsGaneshaClusterSetupClass(GlusterBaseClass):
g.log.error("Failed to add entry of server %s in "
"/etc/hosts of client %s"
% (server, client))
-
- def setUp(self):
- """setUp required for tests
- """
- GlusterBaseClass.setUp.im_func(self)
-
- def tearDown(self):
- """tearDown required for tests
- """
- GlusterBaseClass.tearDown.im_func(self)
+ return True
@classmethod
def tearDownClass(cls, delete_nfs_ganesha_cluster=True):
@@ -213,6 +225,7 @@ class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
"""Setup volume exports volume with nfs-ganesha,
mounts the volume.
"""
+ # pylint: disable=too-many-branches
NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
# Peer probe servers
@@ -227,12 +240,12 @@ class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
for server in cls.servers:
mount_info = [
- {'protocol': 'glusterfs',
- 'mountpoint': '/run/gluster/shared_storage',
- 'server': server,
- 'client': {'host': server},
- 'volname': 'gluster_shared_storage',
- 'options': ''}]
+ {'protocol': 'glusterfs',
+ 'mountpoint': '/run/gluster/shared_storage',
+ 'server': server,
+ 'client': {'host': server},
+ 'volname': 'gluster_shared_storage',
+ 'options': ''}]
mount_obj = create_mount_objs(mount_info)
if not mount_obj[0].is_mounted():
@@ -248,7 +261,7 @@ class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
# Setup Volume
ret = setup_volume(mnode=cls.mnode,
all_servers_info=cls.all_servers_info,
- volume_config=cls.volume, force=True)
+ volume_config=cls.volume)
if not ret:
raise ExecutionError("Setup volume %s failed", cls.volume)
time.sleep(10)
@@ -260,7 +273,7 @@ class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
raise ExecutionError("Failed to get ganesha.enable volume option "
"for %s " % cls.volume)
if vol_option['ganesha.enable'] != 'on':
- ret, out, err = export_nfs_ganesha_volume(
+ ret, _, _ = export_nfs_ganesha_volume(
mnode=cls.mnode, volname=cls.volname)
if ret != 0:
raise ExecutionError("Failed to export volume %s "
@@ -303,7 +316,7 @@ class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
teardown_nfs_ganesha_cluster=True):
"""Teardown the export, mounts and volume.
"""
-
+ # pylint: disable=too-many-branches
# Unmount volume
if umount_vol:
_rc = True
@@ -334,7 +347,7 @@ class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
" option for %s " % cls.volume)
if vol_option['ganesha.enable'] != 'off':
if is_volume_exported(cls.mnode, cls.volname, "nfs"):
- ret, out, err = unexport_nfs_ganesha_volume(
+ ret, _, _ = unexport_nfs_ganesha_volume(
mnode=cls.mnode, volname=cls.volname)
if ret != 0:
raise ExecutionError("Failed to unexport volume %s"
@@ -459,7 +472,7 @@ def wait_for_nfs_ganesha_volume_to_get_exported(mnode, volname, timeout=120):
"""
count = 0
flag = 0
- while (count < timeout):
+ while count < timeout:
if is_volume_exported(mnode, volname, "nfs"):
flag = 1
break
@@ -492,7 +505,7 @@ def wait_for_nfs_ganesha_volume_to_get_unexported(mnode, volname, timeout=120):
"""
count = 0
flag = 0
- while (count < timeout):
+ while count < timeout:
if not is_volume_exported(mnode, volname, "nfs"):
flag = 1
break
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
index f5892990d..1d48a73ad 100644
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
@@ -15,73 +15,25 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# pylint: disable=too-many-lines
"""
Description: Library for nfs ganesha operations.
Pre-requisite: Please install gdeploy package on the glusto-tests
management node.
"""
-from glusto.core import Glusto as g
import os
+from glusto.core import Glusto as g
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.lib_utils import add_services_to_firewall
+from glustolibs.gluster.shared_storage_ops import enable_shared_storage
GDEPLOY_CONF_DIR = "/usr/share/glustolibs/gdeploy_configs/"
-def create_nfs_ganesha_cluster(servers, vips):
- """Creates nfs ganesha cluster using gdeploy
-
- Args:
- servers (list): Nodes in which nfs-ganesha cluster will be created.
- vips (list): virtual IPs of each servers mentioned in 'servers'
- param.
-
- Returns:
- bool : True on successfully creating nfs-ganesha cluster.
- False otherwise
-
- Example:
- create_nfs_ganesha_cluster(servers, vips)
- """
-
- conf_file = "create_nfs_ganesha_cluster.jinja"
- gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
- tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
- ".conf")
-
- values_to_substitute_in_template = {'servers': servers,
- 'vips': vips}
-
- ret = g.render_template(gdeploy_config_file,
- values_to_substitute_in_template,
- tmp_gdeploy_config_file)
- if not ret:
- g.log.error("Failed to substitute values in %s file"
- % tmp_gdeploy_config_file)
- return False
-
- cmd = "gdeploy -c " + tmp_gdeploy_config_file
- retcode, stdout, stderr = g.run_local(cmd)
- if retcode != 0:
- g.log.error("Failed to execute gdeploy cmd %s for creating nfs "
- "ganesha cluster" % cmd)
- g.log.error("gdeploy console output for creating nfs-ganesha "
- "cluster: %s" % stderr)
-
- return False
-
- g.log.info("gdeploy output for creating nfs-ganesha cluster: %s"
- % stdout)
-
- # pcs status output
- _, _, _ = g.run(servers[0], "pcs status")
-
- # Removing the gdeploy conf file from /tmp
- os.remove(tmp_gdeploy_config_file)
- return True
-
-
def teardown_nfs_ganesha_cluster(servers, force=False):
- """Teardown nfs ganesha cluster using gdeploy
+ """
+ Teardown nfs ganesha cluster
Args:
servers (list): Nodes in nfs-ganesha cluster to teardown entire
@@ -96,46 +48,21 @@ def teardown_nfs_ganesha_cluster(servers, force=False):
Example:
teardown_nfs_ganesha_cluster(servers)
"""
-
- conf_file = "teardown_nfs_ganesha_cluster.jinja"
- gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
- tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
- ".conf")
-
- values_to_substitute_in_template = {'servers': servers}
-
- ret = g.render_template(gdeploy_config_file,
- values_to_substitute_in_template,
- tmp_gdeploy_config_file)
- if not ret:
- g.log.error("Failed to substitute values in %s file"
- % tmp_gdeploy_config_file)
- return False
-
- cmd = "gdeploy -c " + tmp_gdeploy_config_file
- retcode, stdout, stderr = g.run_local(cmd)
- if retcode != 0:
- g.log.error("Failed to execute gdeploy cmd %s for teardown nfs "
- "ganesha cluster" % cmd)
- g.log.error("gdeploy console output for teardown nfs-ganesha "
- "cluster: %s" % stderr)
-
- return False
-
- g.log.info("gdeploy output for teardown nfs-ganesha cluster: %s"
- % stdout)
-
- # Removing gdeploy conf file from /tmp
- os.remove(tmp_gdeploy_config_file)
-
if force:
g.log.info("Executing force cleanup...")
for server in servers:
cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --teardown "
"/var/run/gluster/shared_storage/nfs-ganesha")
_, _, _ = g.run(server, cmd)
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --cleanup /var/run/"
+ "gluster/shared_storage/nfs-ganesha")
+ _, _, _ = g.run(server, cmd)
_, _, _ = stop_nfs_ganesha_service(server)
-
+ return True
+ ret, _, _ = disable_nfs_ganesha(servers[0])
+ if ret != 0:
+ g.log.error("Nfs-ganesha disable failed")
+ return False
return True
@@ -202,7 +129,7 @@ def delete_node_from_nfs_ganesha_cluster(servers, node_to_delete):
Args:
servers (list): Nodes of existing nfs-ganesha cluster.
- node_to_delete (str): Node to delete from existing nfs-ganesha cluster.
+ node_to_delete (str): Node to delete from existing nfs-ganesha cluster
Returns:
bool : True on successfully creating nfs-ganesha cluster.
@@ -458,90 +385,6 @@ def update_volume_export_configuration(mnode, volname, config_to_update):
return True
-def enable_root_squash(mnode, volname):
- """
- Enable root squash for the given volume.
-
- Args:
- mnode (str): Node in which cmd command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully enabling root squash on
- nfs-ganesha volume. False otherwise
-
- Example:
- enable_root_squash(mnode, volname)
- """
-
- config_to_update = "Squash=\"Root_squash\";"
- return update_volume_export_configuration(mnode, volname, config_to_update)
-
-
-def disable_root_squash(mnode, volname):
- """
- Disable root squash for the given volume.
-
- Args:
- mnode (str): Node in which cmd command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully disabling root squash on
- nfs-ganesha volume. False otherwise
-
- Example:
- disable_root_squash(mnode, volname)
- """
-
- config_to_update = "Squash=\"No_root_squash\";"
- return update_volume_export_configuration(mnode, volname, config_to_update)
-
-
-def enable_acl(mnode, volname):
- """
- Enable acl for the given volume.
-
- Args:
- mnode (str): Node in which cmd command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully enabling acl on
- nfs-ganesha volume. False otherwise
-
- Example:
- enable_acl(mnode, volname)
- """
-
- config_to_update = "Disable_ACL = false;"
- return update_volume_export_configuration(mnode, volname, config_to_update)
-
-
-def disable_acl(mnode, volname):
- """
- Disable acl for the given volume.
-
- Args:
- mnode (str): Node in which cmd command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully disabling acl on
- nfs-ganesha volume. False otherwise
-
- Example:
- disable_acl(mnode, volname)
- """
-
- config_to_update = "Disable_ACL = true;"
- return update_volume_export_configuration(mnode, volname, config_to_update)
-
-
def is_nfs_ganesha_cluster_in_healthy_state(mnode):
"""
Checks whether nfs ganesha cluster is in healthy state.
@@ -562,7 +405,7 @@ def is_nfs_ganesha_cluster_in_healthy_state(mnode):
"/run/gluster/shared_storage/nfs-ganesha/ | grep " +
" 'Cluster HA Status' | cut -d ' ' -f 4 ")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to check "
"if cluster is in healthy state")
@@ -578,7 +421,7 @@ def is_nfs_ganesha_cluster_in_healthy_state(mnode):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the cluster resources")
@@ -592,7 +435,7 @@ def is_nfs_ganesha_cluster_in_healthy_state(mnode):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the hostnames in cluster")
@@ -601,12 +444,12 @@ def is_nfs_ganesha_cluster_in_healthy_state(mnode):
host_list = stdout.split("\n")
host_list = list(filter(None, host_list))
- if ((cluster_list != []) and (cluster_list == host_list)):
+ if (cluster_list != []) and (cluster_list == host_list):
g.log.info("nfs ganesha cluster is in HEALTHY state")
return True
- else:
- g.log.error("nfs ganesha cluster is not in HEALTHY state")
- return False
+
+ g.log.error("nfs ganesha cluster is not in HEALTHY state")
+ return False
def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
@@ -631,7 +474,7 @@ def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
"/run/gluster/shared_storage/nfs-ganesha/ | grep " +
" 'Cluster HA Status' | cut -d ' ' -f 4 ")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to check "
"if cluster is in failover state")
@@ -647,7 +490,7 @@ def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the cluster resources")
@@ -661,7 +504,7 @@ def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 2 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the hostnames in cluster")
@@ -674,13 +517,14 @@ def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
for cluster_node, host_node in zip(cluster_list, host_list):
if cluster_node in failed_nodes:
if cluster_node == host_node:
- g.log.error("failover status: failed node %s is not takenover "
- "by other node in nfs-ganesha cluster"
- % (cluster_node))
+ g.log.error("failover status: failed node %s isn't taken over"
+ " by other node in nfs-ganesha cluster" %
+ cluster_node)
ret = False
else:
g.log.info("failover status: failed node %s is successfully "
- "failovered to node %s" % (cluster_node, host_node))
+ "failovered to node %s" %
+ (cluster_node, host_node))
else:
if cluster_node != host_node:
g.log.error("Unexpected. Other nodes are in failover state. "
@@ -710,7 +554,7 @@ def is_nfs_ganesha_cluster_in_bad_state(mnode):
"/run/gluster/shared_storage/nfs-ganesha/ | grep " +
" 'Cluster HA Status' | cut -d ' ' -f 4 ")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to check "
"if cluster is in bad state")
@@ -743,7 +587,7 @@ def is_nfs_ganesha_cluster_exists(mnode):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the cluster resources")
@@ -755,58 +599,9 @@ def is_nfs_ganesha_cluster_exists(mnode):
if cluster_list != []:
g.log.info("nfs ganesha cluster exists")
return True
- else:
- g.log.error("nfs ganesha cluster not exists")
- return False
-
-
-def set_nfs_ganesha_client_configuration(client_nodes):
- """Sets pre-requisites in the client machines to
- mount with nfs-ganesha.
-
- Args:
- client_nodes (list): Client nodes in which the prerequisite
- are done to do nfs-ganesha mount.
-
- Returns:
- bool : True on successfully creating nfs-ganesha cluster.
- False otherwise
-
- Example:
- set_nfs_ganesha_client_configuration(client_nodes)
- """
-
- conf_file = "nfs_ganesha_client_configuration.jinja"
- gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
- tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
- ".conf")
- values_to_substitute_in_template = {'servers': client_nodes}
-
- ret = g.render_template(gdeploy_config_file,
- values_to_substitute_in_template,
- tmp_gdeploy_config_file)
- if not ret:
- g.log.error("Failed to substitute values in %s file"
- % tmp_gdeploy_config_file)
- return False
-
- cmd = "gdeploy -c " + tmp_gdeploy_config_file
- retcode, stdout, stderr = g.run_local(cmd)
- if retcode != 0:
- g.log.error("Failed to execute gdeploy cmd %s for setting nfs "
- "ganesha client configuration" % cmd)
- g.log.error("gdeploy console output for setting nfs-ganesha "
- "client configuration: %s" % stderr)
-
- return False
-
- g.log.info("gdeploy output for setting nfs-ganesha client "
- "configuration: %s" % stdout)
-
- # Removing the gdeploy conf file from /tmp
- os.remove(tmp_gdeploy_config_file)
- return True
+ g.log.error("nfs ganesha cluster not exists")
+ return False
def stop_nfs_ganesha_service(mnode):
@@ -907,3 +702,464 @@ def start_pacemaker_service(mnode):
cmd = "systemctl start pacemaker"
return g.run(mnode, cmd)
+
+
+def create_nfs_ganesha_cluster(servers, vips):
+ """
+ Creating a ganesha HA cluster
+
+ Args:
+ servers(list): Hostname of ganesha nodes
+ vips(list): VIPs that has to be assigned for each nodes
+ Returns:
+ True(bool): If configuration of ganesha cluster is success
+ False(bool): If failed to configure ganesha cluster
+ """
+ # pylint: disable=too-many-return-statements
+ ganesha_mnode = servers[0]
+
+ # Configure ports in ganesha servers
+ g.log.info("Defining statd service ports")
+ ret = configure_ports_on_servers(servers)
+ if not ret:
+ g.log.error("Failed to set statd service ports on nodes.")
+ return False
+
+ # Firewall settings for nfs-ganesha
+ ret = ganesha_server_firewall_settings(servers)
+ if not ret:
+ g.log.error("Firewall settings for nfs ganesha has failed.")
+ return False
+ g.log.info("Firewall settings for nfs ganesha was success.")
+
+ # Enable shared storage if not present
+ ret, _, _ = g.run(ganesha_mnode,
+ "gluster v list | grep 'gluster_shared_storage'")
+ if ret != 0:
+ if not enable_shared_storage(ganesha_mnode):
+ g.log.error("Failed to enable shared storage")
+ return False
+ g.log.info("Enabled gluster shared storage.")
+ else:
+ g.log.info("Shared storage is already enabled.")
+
+ # Enable the glusterfssharedstorage.service and nfs-ganesha service
+ for server in servers:
+ cmd = "systemctl enable glusterfssharedstorage.service"
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Failed to enable glusterfssharedstorage.service "
+ "on %s", server)
+ return False
+
+ ret, _, _ = g.run(server, "systemctl enable nfs-ganesha")
+ if ret != 0:
+ g.log.error("Failed to enable nfs-ganesha service on %s", server)
+ return False
+
+ # Password less ssh for nfs
+ ret = create_nfs_passwordless_ssh(ganesha_mnode, servers)
+ if not ret:
+ g.log.error("Password less ssh between nodes failed.")
+ return False
+ g.log.info("Password less ssh between nodes successful.")
+
+ # Create ganesha-ha.conf file
+ tmp_ha_conf = "/tmp/ganesha-ha.conf"
+ create_ganesha_ha_conf(servers, vips, tmp_ha_conf)
+
+ # Check whether ganesha-ha.conf file is created
+ if not os.path.isfile(tmp_ha_conf):
+ g.log.error("Failed to create ganesha-ha.conf")
+ return False
+
+ # Cluster auth setup
+ ret = cluster_auth_setup(servers)
+ if not ret:
+ g.log.error("Failed to configure cluster services")
+ return False
+
+ # Create nfs-ganesha directory in shared storage
+ dpath = '/var/run/gluster/shared_storage/nfs-ganesha'
+ mkdir(ganesha_mnode, dpath)
+
+ # Copy the config files to shared storage
+ cmd = 'cp -p /etc/ganesha/ganesha.conf %s/' % dpath
+ ret, _, _ = g.run(ganesha_mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to copy ganesha.conf to %s/", dpath)
+ return False
+
+ g.upload(ganesha_mnode, tmp_ha_conf, '%s/' % dpath)
+
+ # Create backup of ganesha-ha.conf file in ganesha_mnode
+ g.upload(ganesha_mnode, tmp_ha_conf, '/etc/ganesha/')
+
+ # Enabling ganesha
+ g.log.info("Enable nfs-ganesha")
+ ret, _, _ = enable_nfs_ganesha(ganesha_mnode)
+
+ if ret != 0:
+ g.log.error("Failed to enable ganesha")
+ return False
+
+ g.log.info("Successfully created ganesha cluster")
+
+ # pcs status output
+ _, _, _ = g.run(ganesha_mnode, "pcs status")
+
+ return True
+
+
+def ganesha_server_firewall_settings(servers):
+ """
+ Do firewall settings for ganesha
+
+ Args:
+ servers(list): Hostname of ganesha nodes
+ Returns:
+ True(bool): If successfully set the firewall settings
+ False(bool): If failed to do firewall settings
+ """
+ services = ['nfs', 'rpc-bind', 'high-availability', 'nlm', 'mountd',
+ 'rquota']
+
+ ret = add_services_to_firewall(servers, services, True)
+ if not ret:
+ g.log.error("Failed to set firewall zone permanently on ganesha nodes")
+ return False
+
+ for server in servers:
+ ret, _, _ = g.run(server, "firewall-cmd --add-port=662/tcp "
+ "--add-port=662/udp")
+ if ret != 0:
+ g.log.error("Failed to add firewall port in %s", server)
+ return False
+ ret, _, _ = g.run(server, "firewall-cmd --add-port=662/tcp "
+ "--add-port=662/udp --permanent")
+ if ret != 0:
+ g.log.error("Failed to add firewall port permanently in %s",
+ server)
+ return False
+ return True
+
+
+def ganesha_client_firewall_settings(clients):
+ """
+ Do firewall settings in clients
+
+ Args:
+ clients(list): List of clients
+ Returns:
+ True(bool): If successfully set the firewall settings
+ False(bool): If failed to do firewall settings
+ """
+ for client in clients:
+ _, zone_name, _ = g.run(client,
+ "firewall-cmd --get-active-zones | head -n 1")
+ if not zone_name:
+ g.log.error("Failed to get active zone name in %s", client)
+ return False
+
+ zone_name = zone_name.strip()
+ ret, _, _ = g.run(client, "firewall-cmd --zone=%s "
+ "--add-port=662/tcp --add-port=662/udp "
+ "--add-port=32803/tcp --add-port=32769/udp "
+ "--add-port=2049/udp" % zone_name)
+ if ret != 0:
+ g.log.error("Failed to set firewall ports in %s", client)
+ return False
+
+ ret, _, _ = g.run(client, "firewall-cmd --zone=%s "
+ "--add-port=662/tcp --add-port=662/udp "
+ "--add-port=32803/tcp --add-port=32769/udp "
+ "--add-port=2049/udp"
+ " --permanent" % zone_name)
+ if ret != 0:
+ g.log.error("Failed to set firewall ports permanently in %s",
+ client)
+ return False
+ return True
+
+
+def create_nfs_passwordless_ssh(mnode, gnodes, guser='root'):
+ """
+ Enable key-based SSH authentication without password on all the HA nodes
+
+ Args:
+ mnode(str): Hostname of ganesha maintenance node.
+ snodes(list): Hostname of all ganesha nodes including maintenance node
+ guser(str): User for setting password less ssh
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ loc = '/var/lib/glusterd/nfs'
+ # Generate key on one node if not already present
+ ret, _, _ = g.run(mnode, "test -e %s/secret.pem" % loc)
+ if ret != 0:
+ cmd = "yes n | ssh-keygen -f %s/secret.pem -t rsa -N ''" % loc
+ g.log.info("Generating public key on %s", mnode)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to generate ssh key")
+ return False
+
+ # Deploy the generated public key from mnode to all the nodes
+ # (including mnode)
+ g.log.info("Deploying the generated public key from %s to all the nodes",
+ mnode)
+ for node in gnodes:
+ cmd = "ssh-copy-id -i %s/secret.pem.pub %s@%s" % (loc, guser, node)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to deploy the public key from %s to %s",
+ mnode, node)
+ return False
+
+ # Copy the ssh key pair from mnode to all the nodes in the Ganesha-HA
+ # cluster
+ g.log.info("Copy the ssh key pair from %s to other nodes in the "
+ "Ganesha-HA cluster" % mnode)
+ for node in gnodes:
+ if node != mnode:
+ cmd = ("scp -i %s/secret.pem %s/secret.* %s@%s:%s/"
+ % (loc, loc, guser, node, loc))
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to copy the ssh key pair from %s to %s",
+ mnode, node)
+ return False
+
+ return True
+
+
+def create_ganesha_ha_conf(hostnames, vips, temp_ha_file):
+ """
+ Create temporary ganesha-ha.conf file
+
+ Args:
+ hostnames(list): Hostname of ganesha nodes
+ vips(list): VIPs that has to be assigned for each nodes
+ temp_ha_file(str): temporary local file to create ganesha-ha config
+ """
+ hosts = ','.join(hostnames)
+
+ with open(temp_ha_file, 'wb') as fhand:
+ fhand.write('HA_NAME="ganesha-ha-360"\n')
+ fhand.write('HA_CLUSTER_NODES="%s"\n' % hosts)
+ for (hostname, vip) in zip(hostnames, vips):
+ fhand.write('VIP_%s="%s"\n' % (hostname, vip))
+
+
+def cluster_auth_setup(servers):
+ """
+ Configuring the Cluster Services
+
+ Args:
+ servers(list): Hostname of ganesha nodes
+ Returns:
+ True(bool): If configuration of cluster services is success
+ False(bool): If failed to configure cluster services
+ """
+ result = True
+ for node in servers:
+ # Enable pacemaker.service
+ ret, _, _ = g.run(node, "systemctl enable pacemaker.service")
+ if ret != 0:
+ g.log.error("Failed to enable pacemaker service in %s", node)
+
+ # Start pcsd
+ ret, _, _ = g.run(node, "systemctl start pcsd")
+ if ret != 0:
+ g.log.error("failed to start pcsd on %s", node)
+ return False
+
+ # Enable pcsd on the system
+ ret, _, _ = g.run(node, "systemctl enable pcsd")
+ if ret != 0:
+ g.log.error("Failed to enable pcsd in %s", node)
+
+ # Set a password for the user ‘hacluster’ on all the nodes
+ ret, _, _ = g.run(node, "echo hacluster | passwd --stdin hacluster")
+ if ret != 0:
+ g.log.error("unable to set password for hacluster on %s", node)
+ return False
+
+ # Perform cluster authentication between the nodes
+ for node in servers:
+ ret, _, _ = g.run(node, "pcs cluster auth %s -u hacluster -p "
+ "hacluster" % ' '.join(servers))
+ if ret != 0:
+ g.log.error("pcs cluster auth command failed on %s", node)
+ result = False
+ return result
+
+
+def configure_ports_on_servers(servers):
+ """
+ Define ports for statd service
+
+ Args:
+ servers(list): List of nodes where the port has to be set
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ cmd = "sed -i '/STATD_PORT/s/^#//' /etc/sysconfig/nfs"
+ for server in servers:
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Failed to set statd service port in %s", server)
+ return False
+
+ ret, _, _ = g.run(server, "systemctl restart nfs-config")
+ if ret != 0:
+ g.log.error("Failed to restart nfs-config in %s", server)
+ return False
+
+ ret, _, _ = g.run(server, "systemctl restart rpc-statd")
+ if ret != 0:
+ g.log.error("Failed to restart rpc-statd in %s", server)
+ return False
+ return True
+
+
+def configure_ports_on_clients(clients):
+ """
+ Define ports for statd service
+
+ Args:
+ clients(list): List of clients where the port has to be set
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ for client in clients:
+ # Configure ports
+ cmd = ("sed -i -e '/STATD_PORT/s/^#//' -e '/LOCKD_TCPPORT/s/^#//' "
+ "-e '/LOCKD_UDPPORT/s/^#//' /etc/sysconfig/nfs")
+ ret, _, _ = g.run(client, cmd)
+ if ret != 0:
+ g.log.error("Failed to edit /etc/sysconfig/nfs file in %s",
+ client)
+ return False
+
+ ret, _, _ = g.run(client, "systemctl restart nfs-config")
+ if ret != 0:
+ g.log.error("Failed to restart nfs-config in %s", client)
+ return False
+
+ ret, _, _ = g.run(client, "systemctl restart rpc-statd")
+ if ret != 0:
+ g.log.error("Failed to restart rpc-statd in %s", client)
+ return False
+
+ ret, _, _ = g.run(client, "systemctl restart nfslock")
+ if ret != 0:
+ g.log.error("Failed to restart nfslock in %s", client)
+ return False
+ return True
+
+
+def refresh_config(mnode, volname):
+ """
+ Run refresh-config for exported volume
+
+ Args:
+ mnode(str): Ip/hostname of one node in the cluster
+ volname(str): Volume name for which refresh-config has to be done
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --refresh-config /var/run/"
+ "gluster/shared_storage/nfs-ganesha %s" % volname)
+
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in running the refresh-config script for %s"
+ % volname)
+ return False
+ g.log.info("refresh-config script successfully ran for %s " % volname)
+ return True
+
+
+def set_root_squash(mnode, volname, squash=True, do_refresh_config=True):
+ """
+ Modify volume export file to enable or disable root squash
+
+ Args:
+ mnode(str): Ip/hostname of one node in the cluster
+ volname(str): Volume name for which refresh-config has to be done
+ squash(bool): 'True' to enable and 'False' to disable root squash
+ do_refresh_config(bool): Value to decide refresh-config has to be
+ executed or not after modifying export file
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ if squash:
+ cmd = ("sed -i s/'Squash=.*'/'Squash=\"Root_squash\";'/g /var/run/"
+ "gluster/shared_storage/nfs-ganesha/exports/export.%s.conf"
+ % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in editing the export file of %s" % volname)
+ return False
+ g.log.info("Edited the export file of volume %s successfully to "
+ "enable root squash" % volname)
+ else:
+ cmd = ("sed -i s/'Squash=.*'/'Squash=\"No_root_squash\";'/g /var/"
+ "run/gluster/shared_storage/nfs-ganesha/exports/"
+ "export.%s.conf" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in editing the export file of %s" % volname)
+ return False
+ g.log.info("Edited the export file of volume %s successfully to "
+ "enable root squash" % volname)
+
+ if do_refresh_config:
+ return refresh_config(mnode, volname)
+ return True
+
+
+def set_acl(mnode, volname, acl=True, do_refresh_config=True):
+ """
+ Modify volume export file to enable or disable ACL
+
+ Args:
+ mnode(str): Ip/hostname of one node in the cluster
+ volname(str): Volume name for which refresh-config has to be done
+ acl(bool): 'True' to enable and 'False' to disable ACL
+ do_refresh_config(bool): Value to decide refresh-config has to be
+ executed or not after modifying export file
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ if acl:
+ cmd = ("sed -i s/'Disable_ACL = .*'/'Disable_ACL = false;'/g /var"
+ "/run/gluster/shared_storage/nfs-ganesha/exports/"
+ "export.%s.conf" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in editing the export file of %s" % volname)
+ return False
+ g.log.info("Edited the export file of volume %s successfully to "
+ "enable acl " % volname)
+ else:
+ cmd = ("sed -i s/'Disable_ACL = .*'/'Disable_ACL = true;'/g /var/"
+ "run/gluster/shared_storage/nfs-ganesha/exports/"
+ "export.%s.conf" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in editing the export file of %s" % volname)
+ return False
+ g.log.info("Edited the export file of volume %s successfully to "
+ "disable acl " % volname)
+
+ if do_refresh_config:
+ return refresh_config(mnode, volname)
+ return True