summaryrefslogtreecommitdiffstats
path: root/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
diff options
context:
space:
mode:
authorJilju Joy <jijoy@redhat.com>2018-12-17 16:27:47 +0530
committerAkarsha Rai <akrai@redhat.com>2019-04-15 06:46:19 +0000
commitd76641e56a1cc6edd710675486a492d958a9697a (patch)
treeb5c0332ae3c991d74abf14b90dafeeecc6d7f5de /glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
parent49fa677ae7978b8e7c3c0c02e1e3b6b15406fd08 (diff)
Avoid using gdeploy for nfs-ganesha setup, teardown, acl and root-squash libs
* Removed dependency on gdeploy for setup and teardown of ganesha cluster * Correction of pylint errors * Added new method to set acl * Added new method to set root-squash * Added new method for refresh-config * Removed setUp and tearDown methods from NfsGaneshaClusterSetupClass Change-Id: I1266fe7c09e1fed148ca222712e15932a5ad928c
Diffstat (limited to 'glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py')
-rw-r--r--glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py736
1 files changed, 496 insertions, 240 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
index f589299..1d48a73 100644
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
@@ -15,73 +15,25 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# pylint: disable=too-many-lines
"""
Description: Library for nfs ganesha operations.
Pre-requisite: Please install gdeploy package on the glusto-tests
management node.
"""
-from glusto.core import Glusto as g
import os
+from glusto.core import Glusto as g
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.lib_utils import add_services_to_firewall
+from glustolibs.gluster.shared_storage_ops import enable_shared_storage
GDEPLOY_CONF_DIR = "/usr/share/glustolibs/gdeploy_configs/"
-def create_nfs_ganesha_cluster(servers, vips):
- """Creates nfs ganesha cluster using gdeploy
-
- Args:
- servers (list): Nodes in which nfs-ganesha cluster will be created.
- vips (list): virtual IPs of each servers mentioned in 'servers'
- param.
-
- Returns:
- bool : True on successfully creating nfs-ganesha cluster.
- False otherwise
-
- Example:
- create_nfs_ganesha_cluster(servers, vips)
- """
-
- conf_file = "create_nfs_ganesha_cluster.jinja"
- gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
- tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
- ".conf")
-
- values_to_substitute_in_template = {'servers': servers,
- 'vips': vips}
-
- ret = g.render_template(gdeploy_config_file,
- values_to_substitute_in_template,
- tmp_gdeploy_config_file)
- if not ret:
- g.log.error("Failed to substitute values in %s file"
- % tmp_gdeploy_config_file)
- return False
-
- cmd = "gdeploy -c " + tmp_gdeploy_config_file
- retcode, stdout, stderr = g.run_local(cmd)
- if retcode != 0:
- g.log.error("Failed to execute gdeploy cmd %s for creating nfs "
- "ganesha cluster" % cmd)
- g.log.error("gdeploy console output for creating nfs-ganesha "
- "cluster: %s" % stderr)
-
- return False
-
- g.log.info("gdeploy output for creating nfs-ganesha cluster: %s"
- % stdout)
-
- # pcs status output
- _, _, _ = g.run(servers[0], "pcs status")
-
- # Removing the gdeploy conf file from /tmp
- os.remove(tmp_gdeploy_config_file)
- return True
-
-
def teardown_nfs_ganesha_cluster(servers, force=False):
- """Teardown nfs ganesha cluster using gdeploy
+ """
+ Teardown nfs ganesha cluster
Args:
servers (list): Nodes in nfs-ganesha cluster to teardown entire
@@ -96,46 +48,21 @@ def teardown_nfs_ganesha_cluster(servers, force=False):
Example:
teardown_nfs_ganesha_cluster(servers)
"""
-
- conf_file = "teardown_nfs_ganesha_cluster.jinja"
- gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
- tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
- ".conf")
-
- values_to_substitute_in_template = {'servers': servers}
-
- ret = g.render_template(gdeploy_config_file,
- values_to_substitute_in_template,
- tmp_gdeploy_config_file)
- if not ret:
- g.log.error("Failed to substitute values in %s file"
- % tmp_gdeploy_config_file)
- return False
-
- cmd = "gdeploy -c " + tmp_gdeploy_config_file
- retcode, stdout, stderr = g.run_local(cmd)
- if retcode != 0:
- g.log.error("Failed to execute gdeploy cmd %s for teardown nfs "
- "ganesha cluster" % cmd)
- g.log.error("gdeploy console output for teardown nfs-ganesha "
- "cluster: %s" % stderr)
-
- return False
-
- g.log.info("gdeploy output for teardown nfs-ganesha cluster: %s"
- % stdout)
-
- # Removing gdeploy conf file from /tmp
- os.remove(tmp_gdeploy_config_file)
-
if force:
g.log.info("Executing force cleanup...")
for server in servers:
cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --teardown "
"/var/run/gluster/shared_storage/nfs-ganesha")
_, _, _ = g.run(server, cmd)
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --cleanup /var/run/"
+ "gluster/shared_storage/nfs-ganesha")
+ _, _, _ = g.run(server, cmd)
_, _, _ = stop_nfs_ganesha_service(server)
-
+ return True
+ ret, _, _ = disable_nfs_ganesha(servers[0])
+ if ret != 0:
+ g.log.error("Nfs-ganesha disable failed")
+ return False
return True
@@ -202,7 +129,7 @@ def delete_node_from_nfs_ganesha_cluster(servers, node_to_delete):
Args:
servers (list): Nodes of existing nfs-ganesha cluster.
- node_to_delete (str): Node to delete from existing nfs-ganesha cluster.
+ node_to_delete (str): Node to delete from existing nfs-ganesha cluster
Returns:
bool : True on successfully creating nfs-ganesha cluster.
@@ -458,90 +385,6 @@ def update_volume_export_configuration(mnode, volname, config_to_update):
return True
-def enable_root_squash(mnode, volname):
- """
- Enable root squash for the given volume.
-
- Args:
- mnode (str): Node in which cmd command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully enabling root squash on
- nfs-ganesha volume. False otherwise
-
- Example:
- enable_root_squash(mnode, volname)
- """
-
- config_to_update = "Squash=\"Root_squash\";"
- return update_volume_export_configuration(mnode, volname, config_to_update)
-
-
-def disable_root_squash(mnode, volname):
- """
- Disable root squash for the given volume.
-
- Args:
- mnode (str): Node in which cmd command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully disabling root squash on
- nfs-ganesha volume. False otherwise
-
- Example:
- disable_root_squash(mnode, volname)
- """
-
- config_to_update = "Squash=\"No_root_squash\";"
- return update_volume_export_configuration(mnode, volname, config_to_update)
-
-
-def enable_acl(mnode, volname):
- """
- Enable acl for the given volume.
-
- Args:
- mnode (str): Node in which cmd command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully enabling acl on
- nfs-ganesha volume. False otherwise
-
- Example:
- enable_acl(mnode, volname)
- """
-
- config_to_update = "Disable_ACL = false;"
- return update_volume_export_configuration(mnode, volname, config_to_update)
-
-
-def disable_acl(mnode, volname):
- """
- Disable acl for the given volume.
-
- Args:
- mnode (str): Node in which cmd command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully disabling acl on
- nfs-ganesha volume. False otherwise
-
- Example:
- disable_acl(mnode, volname)
- """
-
- config_to_update = "Disable_ACL = true;"
- return update_volume_export_configuration(mnode, volname, config_to_update)
-
-
def is_nfs_ganesha_cluster_in_healthy_state(mnode):
"""
Checks whether nfs ganesha cluster is in healthy state.
@@ -562,7 +405,7 @@ def is_nfs_ganesha_cluster_in_healthy_state(mnode):
"/run/gluster/shared_storage/nfs-ganesha/ | grep " +
" 'Cluster HA Status' | cut -d ' ' -f 4 ")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to check "
"if cluster is in healthy state")
@@ -578,7 +421,7 @@ def is_nfs_ganesha_cluster_in_healthy_state(mnode):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the cluster resources")
@@ -592,7 +435,7 @@ def is_nfs_ganesha_cluster_in_healthy_state(mnode):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the hostnames in cluster")
@@ -601,12 +444,12 @@ def is_nfs_ganesha_cluster_in_healthy_state(mnode):
host_list = stdout.split("\n")
host_list = list(filter(None, host_list))
- if ((cluster_list != []) and (cluster_list == host_list)):
+ if (cluster_list != []) and (cluster_list == host_list):
g.log.info("nfs ganesha cluster is in HEALTHY state")
return True
- else:
- g.log.error("nfs ganesha cluster is not in HEALTHY state")
- return False
+
+ g.log.error("nfs ganesha cluster is not in HEALTHY state")
+ return False
def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
@@ -631,7 +474,7 @@ def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
"/run/gluster/shared_storage/nfs-ganesha/ | grep " +
" 'Cluster HA Status' | cut -d ' ' -f 4 ")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to check "
"if cluster is in failover state")
@@ -647,7 +490,7 @@ def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the cluster resources")
@@ -661,7 +504,7 @@ def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 2 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the hostnames in cluster")
@@ -674,13 +517,14 @@ def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
for cluster_node, host_node in zip(cluster_list, host_list):
if cluster_node in failed_nodes:
if cluster_node == host_node:
- g.log.error("failover status: failed node %s is not takenover "
- "by other node in nfs-ganesha cluster"
- % (cluster_node))
+ g.log.error("failover status: failed node %s isn't taken over"
+ " by other node in nfs-ganesha cluster" %
+ cluster_node)
ret = False
else:
g.log.info("failover status: failed node %s is successfully "
- "failovered to node %s" % (cluster_node, host_node))
+ "failovered to node %s" %
+ (cluster_node, host_node))
else:
if cluster_node != host_node:
g.log.error("Unexpected. Other nodes are in failover state. "
@@ -710,7 +554,7 @@ def is_nfs_ganesha_cluster_in_bad_state(mnode):
"/run/gluster/shared_storage/nfs-ganesha/ | grep " +
" 'Cluster HA Status' | cut -d ' ' -f 4 ")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to check "
"if cluster is in bad state")
@@ -743,7 +587,7 @@ def is_nfs_ganesha_cluster_exists(mnode):
" 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
"sed s/'-cluster_ip-1'//g")
- retcode, stdout, stderr = g.run(mnode, cmd)
+ retcode, stdout, _ = g.run(mnode, cmd)
if retcode != 0:
g.log.error("Failed to execute nfs-ganesha status command to parse "
"for the cluster resources")
@@ -755,58 +599,9 @@ def is_nfs_ganesha_cluster_exists(mnode):
if cluster_list != []:
g.log.info("nfs ganesha cluster exists")
return True
- else:
- g.log.error("nfs ganesha cluster not exists")
- return False
-
-
-def set_nfs_ganesha_client_configuration(client_nodes):
- """Sets pre-requisites in the client machines to
- mount with nfs-ganesha.
-
- Args:
- client_nodes (list): Client nodes in which the prerequisite
- are done to do nfs-ganesha mount.
-
- Returns:
- bool : True on successfully creating nfs-ganesha cluster.
- False otherwise
-
- Example:
- set_nfs_ganesha_client_configuration(client_nodes)
- """
-
- conf_file = "nfs_ganesha_client_configuration.jinja"
- gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
- tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
- ".conf")
- values_to_substitute_in_template = {'servers': client_nodes}
-
- ret = g.render_template(gdeploy_config_file,
- values_to_substitute_in_template,
- tmp_gdeploy_config_file)
- if not ret:
- g.log.error("Failed to substitute values in %s file"
- % tmp_gdeploy_config_file)
- return False
-
- cmd = "gdeploy -c " + tmp_gdeploy_config_file
- retcode, stdout, stderr = g.run_local(cmd)
- if retcode != 0:
- g.log.error("Failed to execute gdeploy cmd %s for setting nfs "
- "ganesha client configuration" % cmd)
- g.log.error("gdeploy console output for setting nfs-ganesha "
- "client configuration: %s" % stderr)
-
- return False
-
- g.log.info("gdeploy output for setting nfs-ganesha client "
- "configuration: %s" % stdout)
-
- # Removing the gdeploy conf file from /tmp
- os.remove(tmp_gdeploy_config_file)
- return True
+ g.log.error("nfs ganesha cluster not exists")
+ return False
def stop_nfs_ganesha_service(mnode):
@@ -907,3 +702,464 @@ def start_pacemaker_service(mnode):
cmd = "systemctl start pacemaker"
return g.run(mnode, cmd)
+
+
+def create_nfs_ganesha_cluster(servers, vips):
+ """
+ Creating a ganesha HA cluster
+
+ Args:
+ servers(list): Hostname of ganesha nodes
+ vips(list): VIPs that has to be assigned for each nodes
+ Returns:
+ True(bool): If configuration of ganesha cluster is success
+ False(bool): If failed to configure ganesha cluster
+ """
+ # pylint: disable=too-many-return-statements
+ ganesha_mnode = servers[0]
+
+ # Configure ports in ganesha servers
+ g.log.info("Defining statd service ports")
+ ret = configure_ports_on_servers(servers)
+ if not ret:
+ g.log.error("Failed to set statd service ports on nodes.")
+ return False
+
+ # Firewall settings for nfs-ganesha
+ ret = ganesha_server_firewall_settings(servers)
+ if not ret:
+ g.log.error("Firewall settings for nfs ganesha has failed.")
+ return False
+ g.log.info("Firewall settings for nfs ganesha was success.")
+
+ # Enable shared storage if not present
+ ret, _, _ = g.run(ganesha_mnode,
+ "gluster v list | grep 'gluster_shared_storage'")
+ if ret != 0:
+ if not enable_shared_storage(ganesha_mnode):
+ g.log.error("Failed to enable shared storage")
+ return False
+ g.log.info("Enabled gluster shared storage.")
+ else:
+ g.log.info("Shared storage is already enabled.")
+
+ # Enable the glusterfssharedstorage.service and nfs-ganesha service
+ for server in servers:
+ cmd = "systemctl enable glusterfssharedstorage.service"
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Failed to enable glusterfssharedstorage.service "
+ "on %s", server)
+ return False
+
+ ret, _, _ = g.run(server, "systemctl enable nfs-ganesha")
+ if ret != 0:
+ g.log.error("Failed to enable nfs-ganesha service on %s", server)
+ return False
+
+ # Password less ssh for nfs
+ ret = create_nfs_passwordless_ssh(ganesha_mnode, servers)
+ if not ret:
+ g.log.error("Password less ssh between nodes failed.")
+ return False
+ g.log.info("Password less ssh between nodes successful.")
+
+ # Create ganesha-ha.conf file
+ tmp_ha_conf = "/tmp/ganesha-ha.conf"
+ create_ganesha_ha_conf(servers, vips, tmp_ha_conf)
+
+ # Check whether ganesha-ha.conf file is created
+ if not os.path.isfile(tmp_ha_conf):
+ g.log.error("Failed to create ganesha-ha.conf")
+ return False
+
+ # Cluster auth setup
+ ret = cluster_auth_setup(servers)
+ if not ret:
+ g.log.error("Failed to configure cluster services")
+ return False
+
+ # Create nfs-ganesha directory in shared storage
+ dpath = '/var/run/gluster/shared_storage/nfs-ganesha'
+ mkdir(ganesha_mnode, dpath)
+
+ # Copy the config files to shared storage
+ cmd = 'cp -p /etc/ganesha/ganesha.conf %s/' % dpath
+ ret, _, _ = g.run(ganesha_mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to copy ganesha.conf to %s/", dpath)
+ return False
+
+ g.upload(ganesha_mnode, tmp_ha_conf, '%s/' % dpath)
+
+ # Create backup of ganesha-ha.conf file in ganesha_mnode
+ g.upload(ganesha_mnode, tmp_ha_conf, '/etc/ganesha/')
+
+ # Enabling ganesha
+ g.log.info("Enable nfs-ganesha")
+ ret, _, _ = enable_nfs_ganesha(ganesha_mnode)
+
+ if ret != 0:
+ g.log.error("Failed to enable ganesha")
+ return False
+
+ g.log.info("Successfully created ganesha cluster")
+
+ # pcs status output
+ _, _, _ = g.run(ganesha_mnode, "pcs status")
+
+ return True
+
+
+def ganesha_server_firewall_settings(servers):
+ """
+ Do firewall settings for ganesha
+
+ Args:
+ servers(list): Hostname of ganesha nodes
+ Returns:
+ True(bool): If successfully set the firewall settings
+ False(bool): If failed to do firewall settings
+ """
+ services = ['nfs', 'rpc-bind', 'high-availability', 'nlm', 'mountd',
+ 'rquota']
+
+ ret = add_services_to_firewall(servers, services, True)
+ if not ret:
+ g.log.error("Failed to set firewall zone permanently on ganesha nodes")
+ return False
+
+ for server in servers:
+ ret, _, _ = g.run(server, "firewall-cmd --add-port=662/tcp "
+ "--add-port=662/udp")
+ if ret != 0:
+ g.log.error("Failed to add firewall port in %s", server)
+ return False
+ ret, _, _ = g.run(server, "firewall-cmd --add-port=662/tcp "
+ "--add-port=662/udp --permanent")
+ if ret != 0:
+ g.log.error("Failed to add firewall port permanently in %s",
+ server)
+ return False
+ return True
+
+
+def ganesha_client_firewall_settings(clients):
+ """
+ Do firewall settings in clients
+
+ Args:
+ clients(list): List of clients
+ Returns:
+ True(bool): If successfully set the firewall settings
+ False(bool): If failed to do firewall settings
+ """
+ for client in clients:
+ _, zone_name, _ = g.run(client,
+ "firewall-cmd --get-active-zones | head -n 1")
+ if not zone_name:
+ g.log.error("Failed to get active zone name in %s", client)
+ return False
+
+ zone_name = zone_name.strip()
+ ret, _, _ = g.run(client, "firewall-cmd --zone=%s "
+ "--add-port=662/tcp --add-port=662/udp "
+ "--add-port=32803/tcp --add-port=32769/udp "
+ "--add-port=2049/udp" % zone_name)
+ if ret != 0:
+ g.log.error("Failed to set firewall ports in %s", client)
+ return False
+
+ ret, _, _ = g.run(client, "firewall-cmd --zone=%s "
+ "--add-port=662/tcp --add-port=662/udp "
+ "--add-port=32803/tcp --add-port=32769/udp "
+ "--add-port=2049/udp"
+ " --permanent" % zone_name)
+ if ret != 0:
+ g.log.error("Failed to set firewall ports permanently in %s",
+ client)
+ return False
+ return True
+
+
+def create_nfs_passwordless_ssh(mnode, gnodes, guser='root'):
+ """
+ Enable key-based SSH authentication without password on all the HA nodes
+
+ Args:
+ mnode(str): Hostname of ganesha maintenance node.
+ snodes(list): Hostname of all ganesha nodes including maintenance node
+ guser(str): User for setting password less ssh
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ loc = '/var/lib/glusterd/nfs'
+ # Generate key on one node if not already present
+ ret, _, _ = g.run(mnode, "test -e %s/secret.pem" % loc)
+ if ret != 0:
+ cmd = "yes n | ssh-keygen -f %s/secret.pem -t rsa -N ''" % loc
+ g.log.info("Generating public key on %s", mnode)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to generate ssh key")
+ return False
+
+ # Deploy the generated public key from mnode to all the nodes
+ # (including mnode)
+ g.log.info("Deploying the generated public key from %s to all the nodes",
+ mnode)
+ for node in gnodes:
+ cmd = "ssh-copy-id -i %s/secret.pem.pub %s@%s" % (loc, guser, node)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to deploy the public key from %s to %s",
+ mnode, node)
+ return False
+
+ # Copy the ssh key pair from mnode to all the nodes in the Ganesha-HA
+ # cluster
+ g.log.info("Copy the ssh key pair from %s to other nodes in the "
+ "Ganesha-HA cluster" % mnode)
+ for node in gnodes:
+ if node != mnode:
+ cmd = ("scp -i %s/secret.pem %s/secret.* %s@%s:%s/"
+ % (loc, loc, guser, node, loc))
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to copy the ssh key pair from %s to %s",
+ mnode, node)
+ return False
+
+ return True
+
+
+def create_ganesha_ha_conf(hostnames, vips, temp_ha_file):
+ """
+ Create temporary ganesha-ha.conf file
+
+ Args:
+ hostnames(list): Hostname of ganesha nodes
+ vips(list): VIPs that has to be assigned for each nodes
+ temp_ha_file(str): temporary local file to create ganesha-ha config
+ """
+ hosts = ','.join(hostnames)
+
+ with open(temp_ha_file, 'wb') as fhand:
+ fhand.write('HA_NAME="ganesha-ha-360"\n')
+ fhand.write('HA_CLUSTER_NODES="%s"\n' % hosts)
+ for (hostname, vip) in zip(hostnames, vips):
+ fhand.write('VIP_%s="%s"\n' % (hostname, vip))
+
+
+def cluster_auth_setup(servers):
+ """
+ Configuring the Cluster Services
+
+ Args:
+ servers(list): Hostname of ganesha nodes
+ Returns:
+ True(bool): If configuration of cluster services is success
+ False(bool): If failed to configure cluster services
+ """
+ result = True
+ for node in servers:
+ # Enable pacemaker.service
+ ret, _, _ = g.run(node, "systemctl enable pacemaker.service")
+ if ret != 0:
+ g.log.error("Failed to enable pacemaker service in %s", node)
+
+ # Start pcsd
+ ret, _, _ = g.run(node, "systemctl start pcsd")
+ if ret != 0:
+ g.log.error("failed to start pcsd on %s", node)
+ return False
+
+ # Enable pcsd on the system
+ ret, _, _ = g.run(node, "systemctl enable pcsd")
+ if ret != 0:
+ g.log.error("Failed to enable pcsd in %s", node)
+
+ # Set a password for the user ‘hacluster’ on all the nodes
+ ret, _, _ = g.run(node, "echo hacluster | passwd --stdin hacluster")
+ if ret != 0:
+ g.log.error("unable to set password for hacluster on %s", node)
+ return False
+
+ # Perform cluster authentication between the nodes
+ for node in servers:
+ ret, _, _ = g.run(node, "pcs cluster auth %s -u hacluster -p "
+ "hacluster" % ' '.join(servers))
+ if ret != 0:
+ g.log.error("pcs cluster auth command failed on %s", node)
+ result = False
+ return result
+
+
+def configure_ports_on_servers(servers):
+ """
+ Define ports for statd service
+
+ Args:
+ servers(list): List of nodes where the port has to be set
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ cmd = "sed -i '/STATD_PORT/s/^#//' /etc/sysconfig/nfs"
+ for server in servers:
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Failed to set statd service port in %s", server)
+ return False
+
+ ret, _, _ = g.run(server, "systemctl restart nfs-config")
+ if ret != 0:
+ g.log.error("Failed to restart nfs-config in %s", server)
+ return False
+
+ ret, _, _ = g.run(server, "systemctl restart rpc-statd")
+ if ret != 0:
+ g.log.error("Failed to restart rpc-statd in %s", server)
+ return False
+ return True
+
+
+def configure_ports_on_clients(clients):
+ """
+ Define ports for statd service
+
+ Args:
+ clients(list): List of clients where the port has to be set
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ for client in clients:
+ # Configure ports
+ cmd = ("sed -i -e '/STATD_PORT/s/^#//' -e '/LOCKD_TCPPORT/s/^#//' "
+ "-e '/LOCKD_UDPPORT/s/^#//' /etc/sysconfig/nfs")
+ ret, _, _ = g.run(client, cmd)
+ if ret != 0:
+ g.log.error("Failed to edit /etc/sysconfig/nfs file in %s",
+ client)
+ return False
+
+ ret, _, _ = g.run(client, "systemctl restart nfs-config")
+ if ret != 0:
+ g.log.error("Failed to restart nfs-config in %s", client)
+ return False
+
+ ret, _, _ = g.run(client, "systemctl restart rpc-statd")
+ if ret != 0:
+ g.log.error("Failed to restart rpc-statd in %s", client)
+ return False
+
+ ret, _, _ = g.run(client, "systemctl restart nfslock")
+ if ret != 0:
+ g.log.error("Failed to restart nfslock in %s", client)
+ return False
+ return True
+
+
+def refresh_config(mnode, volname):
+ """
+ Run refresh-config for exported volume
+
+ Args:
+ mnode(str): Ip/hostname of one node in the cluster
+ volname(str): Volume name for which refresh-config has to be done
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --refresh-config /var/run/"
+ "gluster/shared_storage/nfs-ganesha %s" % volname)
+
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in running the refresh-config script for %s"
+ % volname)
+ return False
+ g.log.info("refresh-config script successfully ran for %s " % volname)
+ return True
+
+
+def set_root_squash(mnode, volname, squash=True, do_refresh_config=True):
+ """
+ Modify volume export file to enable or disable root squash
+
+ Args:
+ mnode(str): Ip/hostname of one node in the cluster
+ volname(str): Volume name for which refresh-config has to be done
+ squash(bool): 'True' to enable and 'False' to disable root squash
+ do_refresh_config(bool): Value to decide refresh-config has to be
+ executed or not after modifying export file
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ if squash:
+ cmd = ("sed -i s/'Squash=.*'/'Squash=\"Root_squash\";'/g /var/run/"
+ "gluster/shared_storage/nfs-ganesha/exports/export.%s.conf"
+ % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in editing the export file of %s" % volname)
+ return False
+ g.log.info("Edited the export file of volume %s successfully to "
+ "enable root squash" % volname)
+ else:
+ cmd = ("sed -i s/'Squash=.*'/'Squash=\"No_root_squash\";'/g /var/"
+ "run/gluster/shared_storage/nfs-ganesha/exports/"
+ "export.%s.conf" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in editing the export file of %s" % volname)
+ return False
+ g.log.info("Edited the export file of volume %s successfully to "
+ "enable root squash" % volname)
+
+ if do_refresh_config:
+ return refresh_config(mnode, volname)
+ return True
+
+
+def set_acl(mnode, volname, acl=True, do_refresh_config=True):
+ """
+ Modify volume export file to enable or disable ACL
+
+ Args:
+ mnode(str): Ip/hostname of one node in the cluster
+ volname(str): Volume name for which refresh-config has to be done
+ acl(bool): 'True' to enable and 'False' to disable ACL
+ do_refresh_config(bool): Value to decide refresh-config has to be
+ executed or not after modifying export file
+ Returns:
+ True(bool): On success
+ False(bool): On failure
+ """
+ if acl:
+ cmd = ("sed -i s/'Disable_ACL = .*'/'Disable_ACL = false;'/g /var"
+ "/run/gluster/shared_storage/nfs-ganesha/exports/"
+ "export.%s.conf" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in editing the export file of %s" % volname)
+ return False
+ g.log.info("Edited the export file of volume %s successfully to "
+ "enable acl " % volname)
+ else:
+ cmd = ("sed -i s/'Disable_ACL = .*'/'Disable_ACL = true;'/g /var/"
+ "run/gluster/shared_storage/nfs-ganesha/exports/"
+ "export.%s.conf" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Error in editing the export file of %s" % volname)
+ return False
+ g.log.info("Edited the export file of volume %s successfully to "
+ "disable acl " % volname)
+
+ if do_refresh_config:
+ return refresh_config(mnode, volname)
+ return True