summaryrefslogtreecommitdiffstats
path: root/glustolibs-gluster/glustolibs
diff options
context:
space:
mode:
Diffstat (limited to 'glustolibs-gluster/glustolibs')
-rw-r--r--glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py497
-rw-r--r--glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py909
2 files changed, 1406 insertions, 0 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
new file mode 100644
index 000000000..23f187912
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
@@ -0,0 +1,497 @@
+#!/usr/bin/env python
+# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: nfs ganesha base classes.
+ Pre-requisite: Please install gdeploy package on the glusto-tests
+ management node.
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.nfs_ganesha_ops import (
+ is_nfs_ganesha_cluster_exists,
+ is_nfs_ganesha_cluster_in_healthy_state,
+ teardown_nfs_ganesha_cluster,
+ create_nfs_ganesha_cluster,
+ export_nfs_ganesha_volume,
+ unexport_nfs_ganesha_volume,
+ set_nfs_ganesha_client_configuration)
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError, ConfigError
+from glustolibs.gluster.peer_ops import peer_probe_servers, peer_status
+from glustolibs.gluster.volume_ops import volume_info
+from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume,
+ log_volume_info_and_status,
+ get_volume_options,
+ is_volume_exported)
+from glustolibs.gluster.mount_ops import create_mount_objs
+from glustolibs.io.utils import log_mounts_info, wait_for_io_to_complete
+from glustolibs.misc.misc_libs import upload_scripts
+import time
+import socket
+import re
+
+
+class NfsGaneshaClusterSetupClass(GlusterBaseClass):
+ """Creates nfs ganesha cluster
+ """
+ @classmethod
+ def setUpClass(cls):
+ """Setup nfs-ganesha cluster
+ tests.
+ """
+
+ # Check if gdeploy is installed on glusto-tests management node.
+ ret, _, _ = g.run_local("gdeploy --version")
+ if ret != 0:
+ raise ConfigError("Please install gdeploy to run the scripts")
+
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Check if enable_nfs_ganesha is set in config file
+ if not cls.enable_nfs_ganesha:
+ raise ConfigError("Please enable nfs ganesha in config")
+
+ # Read num_of_nfs_ganesha_nodes from config file and create
+ # nfs ganesha cluster accordingly
+ cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes)
+ cls.servers_in_nfs_ganesha_cluster = (
+ cls.servers[:cls.num_of_nfs_ganesha_nodes])
+ cls.vips_in_nfs_ganesha_cluster = (
+ cls.vips[:cls.num_of_nfs_ganesha_nodes])
+
+ # Create nfs ganesha cluster if not exists already
+ if (is_nfs_ganesha_cluster_exists(
+ cls.servers_in_nfs_ganesha_cluster[0])):
+ if is_nfs_ganesha_cluster_in_healthy_state(
+ cls.servers_in_nfs_ganesha_cluster[0]):
+ g.log.info("Nfs-ganesha Cluster exists and is in healthy "
+ "state. Skipping cluster creation...")
+ else:
+ g.log.info("Nfs-ganesha Cluster exists and is not in "
+ "healthy state.")
+ g.log.info("Tearing down existing cluster which is not in "
+ "healthy state")
+ ganesha_ha_file = ("/var/run/gluster/shared_storage/"
+ "nfs-ganesha/ganesha-ha.conf")
+
+ g.log.info("Collecting server details of existing "
+ "nfs ganesha cluster")
+ conn = g.rpyc_get_connection(
+ cls.servers_in_nfs_ganesha_cluster[0], user="root")
+ if conn is None:
+ tmp_node = cls.servers_in_nfs_ganesha_cluster[0]
+ raise ExecutionError("Unable to get connection to 'root' "
+ " of node %s "
+ % tmp_node)
+ if not conn.modules.os.path.exists(ganesha_ha_file):
+ raise ExecutionError("Unable to locate %s"
+ % ganesha_ha_file)
+ with conn.builtin.open(ganesha_ha_file, "r") as fh:
+ ganesha_ha_contents = fh.read()
+ g.rpyc_close_connection(
+ host=cls.servers_in_nfs_ganesha_cluster[0], user="root")
+ servers_in_existing_cluster = re.findall('VIP_(.*)\=.*',
+ ganesha_ha_contents)
+
+ ret = teardown_nfs_ganesha_cluster(
+ servers_in_existing_cluster, force=True)
+ if not ret:
+ raise ExecutionError("Failed to teardown nfs "
+ "ganesha cluster")
+ g.log.info("Existing cluster got teardown successfully")
+ g.log.info("Creating nfs-ganesha cluster of %s nodes"
+ % str(cls.num_of_nfs_ganesha_nodes))
+ g.log.info("Nfs-ganesha cluster node info: %s"
+ % cls.servers_in_nfs_ganesha_cluster)
+ g.log.info("Nfs-ganesha cluster vip info: %s"
+ % cls.vips_in_nfs_ganesha_cluster)
+ ret = create_nfs_ganesha_cluster(
+ cls.servers_in_nfs_ganesha_cluster,
+ cls.vips_in_nfs_ganesha_cluster)
+ if not ret:
+ raise ExecutionError("Failed to create "
+ "nfs-ganesha cluster")
+ else:
+ g.log.info("Creating nfs-ganesha cluster of %s nodes"
+ % str(cls.num_of_nfs_ganesha_nodes))
+ g.log.info("Nfs-ganesha cluster node info: %s"
+ % cls.servers_in_nfs_ganesha_cluster)
+ g.log.info("Nfs-ganesha cluster vip info: %s"
+ % cls.vips_in_nfs_ganesha_cluster)
+ ret = create_nfs_ganesha_cluster(
+ cls.servers_in_nfs_ganesha_cluster,
+ cls.vips_in_nfs_ganesha_cluster)
+ if not ret:
+ raise ExecutionError("Failed to create "
+ "nfs-ganesha cluster")
+
+ if is_nfs_ganesha_cluster_in_healthy_state(
+ cls.servers_in_nfs_ganesha_cluster[0]):
+ g.log.info("Nfs-ganesha Cluster exists is in healthy state")
+ else:
+ raise ExecutionError("Nfs-ganesha Cluster setup Failed")
+
+ ret = set_nfs_ganesha_client_configuration(cls.clients)
+ if not ret:
+ raise ExecutionError("Failed to do client nfs ganesha "
+ "configuration")
+
+ for server in cls.servers:
+ for client in cls.clients:
+ cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
+ "echo \"%s %s\" >> /etc/hosts; fi"
+ % (client, socket.gethostbyname(client), client))
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Failed to add entry of client %s in "
+ "/etc/hosts of server %s"
+ % (client, server))
+
+ for client in cls.clients:
+ for server in cls.servers:
+ cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
+ "echo \"%s %s\" >> /etc/hosts; fi"
+ % (server, socket.gethostbyname(server), server))
+ ret, _, _ = g.run(client, cmd)
+ if ret != 0:
+ g.log.error("Failed to add entry of server %s in "
+ "/etc/hosts of client %s"
+ % (server, client))
+
+ def setUp(self):
+ """setUp required for tests
+ """
+ GlusterBaseClass.setUp.im_func(self)
+
+ def tearDown(self):
+ """tearDown required for tests
+ """
+ GlusterBaseClass.tearDown.im_func(self)
+
+ @classmethod
+ def tearDownClass(cls, delete_nfs_ganesha_cluster=True):
+ """Teardown nfs ganesha cluster.
+ """
+ GlusterBaseClass.tearDownClass.im_func(cls)
+
+ if delete_nfs_ganesha_cluster:
+ ret = teardown_nfs_ganesha_cluster(
+ cls.servers_in_nfs_ganesha_cluster)
+ if not ret:
+ g.log.error("Teardown got failed. Hence, cleaning up "
+ "nfs-ganesha cluster forcefully")
+ ret = teardown_nfs_ganesha_cluster(
+ cls.servers_in_nfs_ganesha_cluster, force=True)
+ if not ret:
+ raise ExecutionError("Force cleanup of nfs-ganesha "
+ "cluster failed")
+ g.log.info("Teardown nfs ganesha cluster succeeded")
+ else:
+ g.log.info("Skipping teardown nfs-ganesha cluster...")
+
+
+class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
+ """Sets up the nfs ganesha cluster, volume for testing purposes.
+ """
+ @classmethod
+ def setUpClass(cls):
+ """Setup volume exports volume with nfs-ganesha,
+ mounts the volume.
+ """
+ NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
+
+ # Peer probe servers
+ ret = peer_probe_servers(cls.mnode, cls.servers)
+ if not ret:
+ raise ExecutionError("Failed to peer probe servers")
+
+ g.log.info("All peers are in connected state")
+
+ # Peer Status from mnode
+ peer_status(cls.mnode)
+
+ for server in cls.servers:
+ mount_info = [
+ {'protocol': 'glusterfs',
+ 'mountpoint': '/run/gluster/shared_storage',
+ 'server': server,
+ 'client': {'host': server},
+ 'volname': 'gluster_shared_storage',
+ 'options': ''}]
+
+ mount_obj = create_mount_objs(mount_info)
+ if not mount_obj[0].is_mounted():
+ ret = mount_obj[0].mount()
+ if not ret:
+ raise ExecutionError("Unable to mount volume '%s:%s' "
+ "on '%s:%s'"
+ % (mount_obj.server_system,
+ mount_obj.volname,
+ mount_obj.client_system,
+ mount_obj.mountpoint))
+
+ # Setup Volume
+ ret = setup_volume(mnode=cls.mnode,
+ all_servers_info=cls.all_servers_info,
+ volume_config=cls.volume, force=True)
+ if not ret:
+ raise ExecutionError("Setup volume %s failed", cls.volume)
+ time.sleep(10)
+
+ # Export volume with nfs ganesha, if it is not exported already
+ vol_option = get_volume_options(cls.mnode, cls.volname,
+ option='ganesha.enable')
+ if vol_option is None:
+ raise ExecutionError("Failed to get ganesha.enable volume option "
+ "for %s " % cls.volume)
+ if vol_option['ganesha.enable'] != 'on':
+ ret, out, err = export_nfs_ganesha_volume(
+ mnode=cls.mnode, volname=cls.volname)
+ if ret != 0:
+ raise ExecutionError("Failed to export volume %s "
+ "as NFS export", cls.volname)
+ time.sleep(5)
+ else:
+ g.log.info("Volume %s is exported already"
+ % cls.volname)
+
+ _, _, _ = g.run(cls.mnode, "showmount -e")
+
+ # Log Volume Info and Status
+ ret = log_volume_info_and_status(cls.mnode, cls.volname)
+ if not ret:
+ raise ExecutionError("Logging volume %s info and status failed",
+ cls.volname)
+
+ # Create Mounts
+ _rc = True
+ for mount_obj in cls.mounts:
+ ret = mount_obj.mount()
+ if not ret:
+ g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
+ mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint)
+ _rc = False
+ if not _rc:
+ raise ExecutionError("Mounting volume %s on few clients failed",
+ cls.volname)
+
+ # Get info of mount before the IO
+ log_mounts_info(cls.mounts)
+
+ @classmethod
+ def tearDownClass(cls, umount_vol=True, cleanup_vol=True,
+ teardown_nfs_ganesha_cluster=True):
+ """Teardown the export, mounts and volume.
+ """
+
+ # Unmount volume
+ if umount_vol:
+ _rc = True
+ for mount_obj in cls.mounts:
+ ret = mount_obj.unmount()
+ if not ret:
+ g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
+ mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint)
+ _rc = False
+ if not _rc:
+ raise ExecutionError("Unmount of all mounts are not "
+ "successful")
+
+ # Cleanup volume
+ if cleanup_vol:
+
+ # Unexport volume, if it is not unexported already
+ vol_option = get_volume_options(cls.mnode, cls.volname,
+ option='ganesha.enable')
+ if vol_option is None:
+ raise ExecutionError("Failed to get ganesha.enable volume "
+ " option for %s " % cls.volume)
+ if vol_option['ganesha.enable'] != 'off':
+ if is_volume_exported(cls.mnode, cls.volname, "nfs"):
+ ret, out, err = unexport_nfs_ganesha_volume(
+ mnode=cls.mnode, volname=cls.volname)
+ if ret != 0:
+ raise ExecutionError("Failed to unexport volume %s "
+ % cls.volname)
+ time.sleep(5)
+ else:
+ g.log.info("Volume %s is unexported already"
+ % cls.volname)
+
+ _, _, _ = g.run(cls.mnode, "showmount -e")
+
+ ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
+ if not ret:
+ raise ExecutionError("cleanup volume %s failed", cls.volname)
+
+ # All Volume Info
+ volume_info(cls.mnode)
+
+ (NfsGaneshaClusterSetupClass.
+ tearDownClass.
+ im_func(cls,
+ delete_nfs_ganesha_cluster=teardown_nfs_ganesha_cluster))
+
+
+class NfsGaneshaIOBaseClass(NfsGaneshaVolumeBaseClass):
+ """ Nfs Ganesha IO base class to run the tests when IO is in progress """
+
+ @classmethod
+ def setUpClass(cls):
+
+ NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls)
+
+ # Upload io scripts for running IO on mounts
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, script_local_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts")
+
+ cls.counter = 1
+
+ def setUp(self):
+ """setUp starts the io from all the mounts.
+ IO creates deep dirs and files.
+ """
+
+ NfsGaneshaVolumeBaseClass.setUp.im_func(self)
+
+ # Start IO on mounts
+ g.log.info("Starting IO on all mounts...")
+ self.all_mounts_procs = []
+ for mount_obj in self.mounts:
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 15 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 10 %s" % (self.script_upload_path,
+ self.counter,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ self.all_mounts_procs.append(proc)
+ self.counter = self.counter + 10
+ self.io_validation_complete = False
+
+ # Adding a delay of 15 seconds before test method starts. This
+ # is to ensure IO's are in progress and giving some time to fill data
+ time.sleep(15)
+
+ def tearDown(self):
+ """If test method failed before validating IO, tearDown waits for the
+ IO's to complete and checks for the IO exit status
+ """
+
+ # Wait for IO to complete if io validation is not executed in the
+ # test method
+ if not self.io_validation_complete:
+ g.log.info("Wait for IO to complete as IO validation did not "
+ "succeed in test method")
+ ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
+ if not ret:
+ raise ExecutionError("IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+ NfsGaneshaVolumeBaseClass.tearDown.im_func(self)
+
+ @classmethod
+ def tearDownClass(cls, umount_volume=True, cleanup_volume=True,
+ teardown_nfsganesha_cluster=True):
+ """Cleanup data from mount, cleanup volume and delete nfs ganesha
+ cluster.
+ """
+ # Log Mounts info
+ g.log.info("Log mounts info")
+ log_mounts_info(cls.mounts)
+
+ (NfsGaneshaVolumeBaseClass.
+ tearDownClass.
+ im_func(cls,
+ umount_vol=umount_volume, cleanup_vol=cleanup_volume,
+ teardown_nfs_ganesha_cluster=teardown_nfsganesha_cluster))
+
+
+def wait_for_nfs_ganesha_volume_to_get_exported(mnode, volname, timeout=120):
+ """Waits for the nfs ganesha volume to get exported
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+ volname (str): volume name
+
+ Kwargs:
+ timeout (int): timeout value in seconds to wait for volume
+ to get exported
+
+ Returns:
+ True on success, False otherwise
+
+ Examples:
+ >>> wait_for_volume_to_get_exported("abc.com", "testvol")
+ """
+ count = 0
+ flag = 0
+ while (count < timeout):
+ if is_volume_exported(mnode, volname, "nfs"):
+ flag = 1
+ break
+
+ time.sleep(10)
+ count = count + 10
+ if not flag:
+ g.log.error("Failed to export volume %s" % volname)
+ return False
+
+ return True
+
+
+def wait_for_nfs_ganesha_volume_to_get_unexported(mnode, volname, timeout=120):
+ """Waits for the nfs ganesha volume to get unexported
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+ volname (str): volume name
+
+ Kwargs:
+ timeout (int): timeout value in seconds to wait for volume
+ to get unexported
+
+ Returns:
+ True on success, False otherwise
+
+ Examples:
+ >>> wait_for_volume_to_get_unexported("abc.com", "testvol")
+ """
+ count = 0
+ flag = 0
+ while (count < timeout):
+ if not is_volume_exported(mnode, volname, "nfs"):
+ flag = 1
+ break
+
+ time.sleep(10)
+ count = count + 10
+ if not flag:
+ g.log.error("Failed to unexport volume %s" % volname)
+ return False
+
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
new file mode 100644
index 000000000..f5892990d
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
@@ -0,0 +1,909 @@
+#!/usr/bin/env python
+# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Library for nfs ganesha operations.
+ Pre-requisite: Please install gdeploy package on the glusto-tests
+ management node.
+"""
+
+from glusto.core import Glusto as g
+import os
+
+GDEPLOY_CONF_DIR = "/usr/share/glustolibs/gdeploy_configs/"
+
+
+def create_nfs_ganesha_cluster(servers, vips):
+ """Creates nfs ganesha cluster using gdeploy
+
+ Args:
+ servers (list): Nodes in which nfs-ganesha cluster will be created.
+ vips (list): virtual IPs of each servers mentioned in 'servers'
+ param.
+
+ Returns:
+ bool : True on successfully creating nfs-ganesha cluster.
+ False otherwise
+
+ Example:
+ create_nfs_ganesha_cluster(servers, vips)
+ """
+
+ conf_file = "create_nfs_ganesha_cluster.jinja"
+ gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
+ tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
+ ".conf")
+
+ values_to_substitute_in_template = {'servers': servers,
+ 'vips': vips}
+
+ ret = g.render_template(gdeploy_config_file,
+ values_to_substitute_in_template,
+ tmp_gdeploy_config_file)
+ if not ret:
+ g.log.error("Failed to substitute values in %s file"
+ % tmp_gdeploy_config_file)
+ return False
+
+ cmd = "gdeploy -c " + tmp_gdeploy_config_file
+ retcode, stdout, stderr = g.run_local(cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute gdeploy cmd %s for creating nfs "
+ "ganesha cluster" % cmd)
+ g.log.error("gdeploy console output for creating nfs-ganesha "
+ "cluster: %s" % stderr)
+
+ return False
+
+ g.log.info("gdeploy output for creating nfs-ganesha cluster: %s"
+ % stdout)
+
+ # pcs status output
+ _, _, _ = g.run(servers[0], "pcs status")
+
+ # Removing the gdeploy conf file from /tmp
+ os.remove(tmp_gdeploy_config_file)
+ return True
+
+
+def teardown_nfs_ganesha_cluster(servers, force=False):
+ """Teardown nfs ganesha cluster using gdeploy
+
+ Args:
+ servers (list): Nodes in nfs-ganesha cluster to teardown entire
+ cluster
+ force (bool): if this option is set to True, then nfs ganesha cluster
+ is teardown using force cleanup
+
+ Returns:
+ bool : True on successfully teardown nfs-ganesha cluster.
+ False otherwise
+
+ Example:
+ teardown_nfs_ganesha_cluster(servers)
+ """
+
+ conf_file = "teardown_nfs_ganesha_cluster.jinja"
+ gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
+ tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
+ ".conf")
+
+ values_to_substitute_in_template = {'servers': servers}
+
+ ret = g.render_template(gdeploy_config_file,
+ values_to_substitute_in_template,
+ tmp_gdeploy_config_file)
+ if not ret:
+ g.log.error("Failed to substitute values in %s file"
+ % tmp_gdeploy_config_file)
+ return False
+
+ cmd = "gdeploy -c " + tmp_gdeploy_config_file
+ retcode, stdout, stderr = g.run_local(cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute gdeploy cmd %s for teardown nfs "
+ "ganesha cluster" % cmd)
+ g.log.error("gdeploy console output for teardown nfs-ganesha "
+ "cluster: %s" % stderr)
+
+ return False
+
+ g.log.info("gdeploy output for teardown nfs-ganesha cluster: %s"
+ % stdout)
+
+ # Removing gdeploy conf file from /tmp
+ os.remove(tmp_gdeploy_config_file)
+
+ if force:
+ g.log.info("Executing force cleanup...")
+ for server in servers:
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --teardown "
+ "/var/run/gluster/shared_storage/nfs-ganesha")
+ _, _, _ = g.run(server, cmd)
+ _, _, _ = stop_nfs_ganesha_service(server)
+
+ return True
+
+
+def add_node_to_nfs_ganesha_cluster(servers, node_to_add, vip):
+ """Adds a node to nfs ganesha cluster using gdeploy
+
+ Args:
+ servers (list): Nodes of existing nfs-ganesha cluster.
+ node_to_add (str): Node to add in existing nfs-ganesha cluster.
+ vip (str): virtual IP of the node mentioned in 'node_to_add'
+ param.
+
+ Returns:
+ bool : True on successfully adding node to nfs-ganesha cluster.
+ False otherwise
+
+ Example:
+ add_node_to_nfs_ganesha_cluster(servers, node_to_add, vip)
+ """
+
+ conf_file = "add_node_to_nfs_ganesha_cluster.jinja"
+ gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
+ tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
+ ".conf")
+ cluster_nodes = servers
+ hosts = servers + [node_to_add]
+
+ values_to_substitute_in_template = {'servers': hosts,
+ 'node_to_add': node_to_add,
+ 'cluster_nodes': cluster_nodes,
+ 'vip': vip}
+
+ ret = g.render_template(gdeploy_config_file,
+ values_to_substitute_in_template,
+ tmp_gdeploy_config_file)
+ if not ret:
+ g.log.error("Failed to substitute values in %s file"
+ % tmp_gdeploy_config_file)
+ return False
+
+ cmd = "gdeploy -c " + tmp_gdeploy_config_file
+ retcode, stdout, stderr = g.run_local(cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute gdeploy cmd %s for adding node "
+ "in existing nfs ganesha cluster" % cmd)
+ g.log.error("gdeploy console output for adding node in "
+ "existing nfs-ganesha cluster: %s" % stderr)
+
+ return False
+
+ g.log.info("gdeploy output for adding node in existing "
+ "nfs-ganesha cluster: %s" % stdout)
+
+ # pcs status output
+ _, _, _ = g.run(servers[0], "pcs status")
+
+ # Removing gdeploy conf file from /tmp
+ os.remove(tmp_gdeploy_config_file)
+ return True
+
+
+def delete_node_from_nfs_ganesha_cluster(servers, node_to_delete):
+ """Deletes a node from existing nfs ganesha cluster using gdeploy
+
+ Args:
+ servers (list): Nodes of existing nfs-ganesha cluster.
+ node_to_delete (str): Node to delete from existing nfs-ganesha cluster.
+
+ Returns:
+ bool : True on successfully creating nfs-ganesha cluster.
+ False otherwise
+
+ Example:
+ delete_node_from_nfs_ganesha_cluster(servers, node_to_delete)
+ """
+
+ conf_file = "delete_node_from_nfs_ganesha_cluster.jinja"
+ gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
+ tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
+ ".conf")
+
+ values_to_substitute_in_template = {'servers': servers,
+ 'node_to_delete': node_to_delete}
+
+ ret = g.render_template(gdeploy_config_file,
+ values_to_substitute_in_template,
+ tmp_gdeploy_config_file)
+ if not ret:
+ g.log.error("Failed to substitute values in %s file"
+ % tmp_gdeploy_config_file)
+ return False
+
+ cmd = "gdeploy -c " + tmp_gdeploy_config_file
+ retcode, stdout, stderr = g.run_local(cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute gdeploy cmd %s for deleting node "
+ "from existing nfs ganesha cluster" % cmd)
+ g.log.error("gdeploy console output for deleting node from "
+ "existing nfs-ganesha cluster: %s" % stderr)
+
+ return False
+
+ g.log.info("gdeploy output for deleting node from existing "
+ "nfs-ganesha cluster: %s" % stdout)
+
+ # pcs status output
+ _, _, _ = g.run(servers[0], "pcs status")
+
+ # Removing gdeploy conf file from /tmp
+ os.remove(tmp_gdeploy_config_file)
+ return True
+
+
+def enable_nfs_ganesha(mnode):
+ """Enables nfs-ganesha cluster in the storage pool.
+ All the pre-requisites to create nfs-ganesha cluster
+ has to be done prior to use this module.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ enable_nfs_ganesha("abc.com")
+ """
+
+ cmd = "gluster nfs-ganesha enable --mode=script"
+ return g.run(mnode, cmd)
+
+
+def disable_nfs_ganesha(mnode):
+ """Disables nfs-ganesha cluster in the storage pool.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ disable_nfs_ganesha("abc.com")
+ """
+
+ cmd = "gluster nfs-ganesha disable --mode=script"
+ return g.run(mnode, cmd)
+
+
+def export_nfs_ganesha_volume(mnode, volname):
+ """Exports nfs-ganesha volume.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): Volume name
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ export_nfs_ganesha_volume("abc.com", volname)
+ """
+
+ cmd = "gluster volume set %s ganesha.enable on" % volname
+ return g.run(mnode, cmd)
+
+
+def unexport_nfs_ganesha_volume(mnode, volname):
+ """Unexport nfs-ganesha volume.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): Volume name
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ unexport_nfs_ganesha_volume("abc.com", volname)
+ """
+
+ cmd = "gluster volume set %s ganesha.enable off" % volname
+ return g.run(mnode, cmd)
+
+
+def run_refresh_config(mnode, volname):
+ """Runs refresh config on nfs ganesha volume.
+
+ Args:
+ mnode (str): Node in which refresh config command will
+ be executed.
+ volname (str): volume name
+
+ Returns:
+ bool : True on successfully running refresh config on
+ nfs-ganesha volume. False otherwise
+
+ Example:
+ run_refresh_config("abc.com", volname)
+ """
+
+ conf_file = "nfs_ganesha_refresh_config.jinja"
+ gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
+
+ tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
+ ".conf")
+
+ values_to_substitute_in_template = {'server': mnode,
+ 'volname': volname}
+
+ ret = g.render_template(gdeploy_config_file,
+ values_to_substitute_in_template,
+ tmp_gdeploy_config_file)
+ if not ret:
+ g.log.error("Failed to substitute values in %s file"
+ % tmp_gdeploy_config_file)
+ return False
+
+ cmd = "gdeploy -c " + tmp_gdeploy_config_file
+ retcode, stdout, stderr = g.run_local(cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute gdeploy cmd %s for running "
+ "refresh config on nfs ganesha volume" % cmd)
+ g.log.error("gdeploy console output for running refresh config "
+ "on nfs ganesha volume: %s" % stderr)
+
+ return False
+
+ g.log.info("gdeploy output for running refresh config "
+ "on nfs ganesha volume: %s" % stdout)
+
+ # Removing the gdeploy conf file from /tmp
+ os.remove(tmp_gdeploy_config_file)
+ return True
+
+
+def update_volume_export_configuration(mnode, volname, config_to_update):
+ """Updates volume export configuration and runs
+ refresh config for the volume.
+
+ Args:
+ mnode (str): Node in which refresh config command will
+ be executed.
+ volname (str): volume name
+ config_to_update (str): config lines to update in volume
+ export configuration file.
+
+ Returns:
+ bool : True on successfully updating export config for
+ nfs-ganesha volume. False otherwise
+
+ Example:
+ update_volume_export_configuration(mnode, volname, config_to_update)
+ """
+
+ conf_file = "nfs_ganesha_update_export_file.jinja"
+ gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
+ tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
+ ".conf")
+
+ values_to_substitute_in_template = {'server': mnode,
+ 'volname': volname,
+ 'config_to_update': config_to_update}
+
+ ret = g.render_template(gdeploy_config_file,
+ values_to_substitute_in_template,
+ tmp_gdeploy_config_file)
+ if not ret:
+ g.log.error("Failed to substitute values in %s file"
+ % tmp_gdeploy_config_file)
+ return False
+
+ cmd = "gdeploy -c " + tmp_gdeploy_config_file
+ retcode, stdout, stderr = g.run_local(cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute gdeploy cmd %s to update export "
+ "configuration on nfs ganesha volume" % cmd)
+ g.log.error("gdeploy console output to update export "
+ "configuration on nfs ganesha volume: %s" % stderr)
+
+ return False
+
+ g.log.info("gdeploy output to update export configuration "
+ "on nfs ganesha volume: %s" % stdout)
+
+ # Removing the gdeploy conf file from /tmp
+ os.remove(tmp_gdeploy_config_file)
+ return True
+
+
+def enable_root_squash(mnode, volname):
+ """
+ Enable root squash for the given volume.
+
+ Args:
+ mnode (str): Node in which cmd command will
+ be executed.
+ volname (str): volume name
+
+ Returns:
+ bool : True on successfully enabling root squash on
+ nfs-ganesha volume. False otherwise
+
+ Example:
+ enable_root_squash(mnode, volname)
+ """
+
+ config_to_update = "Squash=\"Root_squash\";"
+ return update_volume_export_configuration(mnode, volname, config_to_update)
+
+
+def disable_root_squash(mnode, volname):
+ """
+ Disable root squash for the given volume.
+
+ Args:
+ mnode (str): Node in which cmd command will
+ be executed.
+ volname (str): volume name
+
+ Returns:
+ bool : True on successfully disabling root squash on
+ nfs-ganesha volume. False otherwise
+
+ Example:
+ disable_root_squash(mnode, volname)
+ """
+
+ config_to_update = "Squash=\"No_root_squash\";"
+ return update_volume_export_configuration(mnode, volname, config_to_update)
+
+
+def enable_acl(mnode, volname):
+ """
+ Enable acl for the given volume.
+
+ Args:
+ mnode (str): Node in which cmd command will
+ be executed.
+ volname (str): volume name
+
+ Returns:
+ bool : True on successfully enabling acl on
+ nfs-ganesha volume. False otherwise
+
+ Example:
+ enable_acl(mnode, volname)
+ """
+
+ config_to_update = "Disable_ACL = false;"
+ return update_volume_export_configuration(mnode, volname, config_to_update)
+
+
+def disable_acl(mnode, volname):
+ """
+ Disable acl for the given volume.
+
+ Args:
+ mnode (str): Node in which cmd command will
+ be executed.
+ volname (str): volume name
+
+ Returns:
+ bool : True on successfully disabling acl on
+ nfs-ganesha volume. False otherwise
+
+ Example:
+ disable_acl(mnode, volname)
+ """
+
+ config_to_update = "Disable_ACL = true;"
+ return update_volume_export_configuration(mnode, volname, config_to_update)
+
+
+def is_nfs_ganesha_cluster_in_healthy_state(mnode):
+ """
+ Checks whether nfs ganesha cluster is in healthy state.
+
+ Args:
+ mnode (str): Node in which cmd command will
+ be executed.
+
+ Returns:
+ bool : True if nfs ganesha cluster is in healthy state.
+ False otherwise
+
+ Example:
+ is_nfs_ganesha_cluster_in_healthy_state(mnode)
+ """
+
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --status " +
+ "/run/gluster/shared_storage/nfs-ganesha/ | grep " +
+ " 'Cluster HA Status' | cut -d ' ' -f 4 ")
+
+ retcode, stdout, stderr = g.run(mnode, cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute nfs-ganesha status command to check "
+ "if cluster is in healthy state")
+ return False
+
+ if stdout.strip('\n') != "HEALTHY":
+ g.log.error("nfs-ganesha cluster is not in healthy state. Current "
+ "cluster state: %s " % stdout)
+ return False
+
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --status " +
+ "/run/gluster/shared_storage/nfs-ganesha/ | grep -v" +
+ " 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
+ "sed s/'-cluster_ip-1'//g")
+
+ retcode, stdout, stderr = g.run(mnode, cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute nfs-ganesha status command to parse "
+ "for the cluster resources")
+ return False
+
+ cluster_list = stdout.split("\n")
+ cluster_list = list(filter(None, cluster_list))
+
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --status " +
+ "/run/gluster/shared_storage/nfs-ganesha/ | grep -v" +
+ " 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
+ "sed s/'-cluster_ip-1'//g")
+
+ retcode, stdout, stderr = g.run(mnode, cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute nfs-ganesha status command to parse "
+ "for the hostnames in cluster")
+ return False
+
+ host_list = stdout.split("\n")
+ host_list = list(filter(None, host_list))
+
+ if ((cluster_list != []) and (cluster_list == host_list)):
+ g.log.info("nfs ganesha cluster is in HEALTHY state")
+ return True
+ else:
+ g.log.error("nfs ganesha cluster is not in HEALTHY state")
+ return False
+
+
+def is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes):
+ """
+ Checks whether nfs ganesha cluster is in failover state.
+
+ Args:
+ mnode (str): Node in which cmd command will
+ be executed.
+ failed_nodes (list): Nodes in which nfs-ganesha process
+ are down.
+
+ Returns:
+ bool : True if nfs ganesha cluster is in failover state.
+ False otherwise
+
+ Example:
+ is_nfs_ganesha_cluster_in_failover_state(mnode, failed_nodes)
+ """
+
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --status " +
+ "/run/gluster/shared_storage/nfs-ganesha/ | grep " +
+ " 'Cluster HA Status' | cut -d ' ' -f 4 ")
+
+ retcode, stdout, stderr = g.run(mnode, cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute nfs-ganesha status command to check "
+ "if cluster is in failover state")
+ return False
+
+ if stdout.strip('\n') != "FAILOVER":
+ g.log.error("nfs-ganesha cluster is not in failover state. Current "
+ "cluster state: %s " % stdout)
+ return False
+
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --status " +
+ "/run/gluster/shared_storage/nfs-ganesha/ | grep -v" +
+ " 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
+ "sed s/'-cluster_ip-1'//g")
+
+ retcode, stdout, stderr = g.run(mnode, cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute nfs-ganesha status command to parse "
+ "for the cluster resources")
+ return False
+
+ cluster_list = stdout.split("\n")
+ cluster_list = list(filter(None, cluster_list))
+
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --status " +
+ "/run/gluster/shared_storage/nfs-ganesha/ | grep -v" +
+ " 'Online' | grep -v 'Cluster' | cut -d ' ' -f 2 | " +
+ "sed s/'-cluster_ip-1'//g")
+
+ retcode, stdout, stderr = g.run(mnode, cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute nfs-ganesha status command to parse "
+ "for the hostnames in cluster")
+ return False
+
+ host_list = stdout.split("\n")
+ host_list = list(filter(None, host_list))
+
+ ret = True
+ for cluster_node, host_node in zip(cluster_list, host_list):
+ if cluster_node in failed_nodes:
+ if cluster_node == host_node:
+ g.log.error("failover status: failed node %s is not takenover "
+ "by other node in nfs-ganesha cluster"
+ % (cluster_node))
+ ret = False
+ else:
+ g.log.info("failover status: failed node %s is successfully "
+ "failovered to node %s" % (cluster_node, host_node))
+ else:
+ if cluster_node != host_node:
+ g.log.error("Unexpected. Other nodes are in failover state. "
+ "Node %s is takenover by node %s in nfs-ganesha "
+ "cluster" % (cluster_node, host_node))
+ ret = False
+ return ret
+
+
+def is_nfs_ganesha_cluster_in_bad_state(mnode):
+ """
+ Checks whether nfs ganesha cluster is in bad state.
+
+ Args:
+ mnode (str): Node in which cmd command will
+ be executed.
+
+ Returns:
+ bool : True if nfs ganesha cluster is in bad state.
+ False otherwise
+
+ Example:
+ is_nfs_ganesha_cluster_in_bad_state(mnode)
+ """
+
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --status " +
+ "/run/gluster/shared_storage/nfs-ganesha/ | grep " +
+ " 'Cluster HA Status' | cut -d ' ' -f 4 ")
+
+ retcode, stdout, stderr = g.run(mnode, cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute nfs-ganesha status command to check "
+ "if cluster is in bad state")
+ return False
+
+ if stdout.strip('\n') != "BAD":
+ g.log.error("nfs-ganesha cluster is not in bad state. Current "
+ "cluster state: %s " % stdout)
+ return False
+
+
+def is_nfs_ganesha_cluster_exists(mnode):
+ """
+ Checks whether nfs ganesha cluster exists.
+
+ Args:
+ mnode (str): Node in which cmd command will
+ be executed.
+
+ Returns:
+ bool : True if nfs ganesha cluster exists.
+ False otherwise
+
+ Example:
+ is_nfs_ganesha_cluster_exists(mnode)
+ """
+
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --status " +
+ "/run/gluster/shared_storage/nfs-ganesha/ | grep -v" +
+ " 'Online' | grep -v 'Cluster' | cut -d ' ' -f 1 | " +
+ "sed s/'-cluster_ip-1'//g")
+
+ retcode, stdout, stderr = g.run(mnode, cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute nfs-ganesha status command to parse "
+ "for the cluster resources")
+ return False
+
+ cluster_list = stdout.split("\n")
+ cluster_list = list(filter(None, cluster_list))
+
+ if cluster_list != []:
+ g.log.info("nfs ganesha cluster exists")
+ return True
+ else:
+ g.log.error("nfs ganesha cluster not exists")
+ return False
+
+
+def set_nfs_ganesha_client_configuration(client_nodes):
+ """Sets pre-requisites in the client machines to
+ mount with nfs-ganesha.
+
+ Args:
+ client_nodes (list): Client nodes in which the prerequisite
+ are done to do nfs-ganesha mount.
+
+ Returns:
+ bool : True on successfully creating nfs-ganesha cluster.
+ False otherwise
+
+ Example:
+ set_nfs_ganesha_client_configuration(client_nodes)
+ """
+
+ conf_file = "nfs_ganesha_client_configuration.jinja"
+ gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
+ tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
+ ".conf")
+
+ values_to_substitute_in_template = {'servers': client_nodes}
+
+ ret = g.render_template(gdeploy_config_file,
+ values_to_substitute_in_template,
+ tmp_gdeploy_config_file)
+ if not ret:
+ g.log.error("Failed to substitute values in %s file"
+ % tmp_gdeploy_config_file)
+ return False
+
+ cmd = "gdeploy -c " + tmp_gdeploy_config_file
+ retcode, stdout, stderr = g.run_local(cmd)
+ if retcode != 0:
+ g.log.error("Failed to execute gdeploy cmd %s for setting nfs "
+ "ganesha client configuration" % cmd)
+ g.log.error("gdeploy console output for setting nfs-ganesha "
+ "client configuration: %s" % stderr)
+
+ return False
+
+ g.log.info("gdeploy output for setting nfs-ganesha client "
+ "configuration: %s" % stdout)
+
+ # Removing the gdeploy conf file from /tmp
+ os.remove(tmp_gdeploy_config_file)
+ return True
+
+
+def stop_nfs_ganesha_service(mnode):
+ """Stops nfs-ganesha service in given node.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ stop_nfs_ganesha_service(mnode)
+ """
+
+ cmd = "systemctl stop nfs-ganesha"
+ return g.run(mnode, cmd)
+
+
+def start_nfs_ganesha_service(mnode):
+ """Starts nfs-ganesha service in given node.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ start_nfs_ganesha_service(mnode)
+ """
+
+ cmd = "systemctl start nfs-ganesha"
+ return g.run(mnode, cmd)
+
+
+def kill_nfs_ganesha_service(mnode):
+ """Kills nfs-ganesha service in given node.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ kill_nfs_ganesha_service(mnode)
+ """
+
+ cmd = "kill -9 $(pgrep ganesha.nfsd)"
+ return g.run(mnode, cmd)
+
+
+def start_pacemaker_service(mnode):
+ """Starts pacemaker service in given node.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ start_pacemaker_service(mnode)
+ """
+
+ cmd = "systemctl start pacemaker"
+ return g.run(mnode, cmd)