summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glustolibs-gluster/glustolibs/gluster/lib_utils.py52
-rw-r--r--glustolibs-io/glustolibs/io/utils.py204
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py93
3 files changed, 349 insertions, 0 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/lib_utils.py b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
index d1329d688..1b3822d11 100644
--- a/glustolibs-gluster/glustolibs/gluster/lib_utils.py
+++ b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
@@ -659,3 +659,55 @@ def check_if_dir_is_filled(mnode, dirname, percent_to_fill,
" percentage")
return True
return False
+
+
+def install_epel(servers):
+ """
+ Module to install epel in rhel/centos/fedora systems.
+
+ Args:
+ servers (list): servers in which epel to be installed.
+
+ Returns:
+ bool: True, if epel is installed successfully, False otherwise
+
+ Example:
+ install_epel(["abc.com", "def.com"])
+ """
+
+ rt = True
+ results = g.run_parallel(servers, "yum list installed epel-release")
+ for server in servers:
+ if results[server][0] != 0:
+ ret, out, _ = g.run(server,
+ "cat /etc/redhat-release")
+ if ret != 0:
+ g.log.error("Failed to recognize OS release")
+ rt = False
+ release_string = out
+ if "release 5" in release_string:
+ ret, _, _ = g.run(server,
+ "yum -y install http://dl.fedoraproject.org/"
+ "pub/epel/epel-release-latest-5.noarch.rpm")
+ if ret != 0:
+ g.log.error("Epel install failed")
+ rt = False
+ elif "release 6" in release_string:
+ ret, _, _ = g.run(server,
+ "yum -y install http://dl.fedoraproject.org/"
+ "pub/epel/epel-release-latest-6.noarch.rpm")
+ if ret != 0:
+ g.log.error("Epel install failed")
+ rt = False
+ elif (("release 7" in release_string) or
+ ("Fedora" in release_string)):
+ ret, _, _ = g.run(server,
+ "yum -y install http://dl.fedoraproject.org/"
+ "pub/epel/epel-release-latest-7.noarch.rpm")
+ if ret != 0:
+ g.log.error("Epel install failed")
+ rt = False
+ else:
+ g.log.error("Unrecognized release. Skipping epel install")
+ rt = False
+ return rt
diff --git a/glustolibs-io/glustolibs/io/utils.py b/glustolibs-io/glustolibs/io/utils.py
index 9b8d338a7..e7e2fd4e2 100644
--- a/glustolibs-io/glustolibs/io/utils.py
+++ b/glustolibs-io/glustolibs/io/utils.py
@@ -21,6 +21,7 @@ import os
import subprocess
from glusto.core import Glusto as g
from glustolibs.gluster.mount_ops import GlusterMount
+from multiprocessing import Pool
def collect_mounts_arequal(mounts):
@@ -375,3 +376,206 @@ def cleanup_mounts(mounts):
list_all_files_and_dirs_mounts(mounts)
return _rc_lookup
+
+
+def run_bonnie(servers, directory_to_run, username="root"):
+ """
+ Module to run bonnie test suite on the given servers.
+
+ Args:
+ servers (list): servers in which tests to be run.
+ directory_to_run (list): directory path where tests will run for
+ each server.
+
+ Kwargs:
+ username (str): username. Defaults to root.
+
+ Returns:
+ bool: True, if test passes in all servers, False otherwise
+
+ Example:
+ run_bonnie(["abc.com", "def.com"], ["/mnt/test1", "/mnt/test2"])
+ """
+
+ g.log.info("Running bonnie tests on %s" % ','.join(servers))
+ rt = True
+ options_for_each_servers = []
+
+ # Install bonnie test suite if not installed
+ results = g.run_parallel(servers, "yum list installed bonnie++")
+ for index, server in enumerate(servers):
+ if results[server][0] != 0:
+ ret, out, _ = g.run(server,
+ "yum list installed bonnie++ || "
+ "yum -y install bonnie++")
+ if ret != 0:
+ g.log.error("Failed to install bonnie on %s" % server)
+ return False
+
+ # Building options for bonnie tests
+ options_list = []
+ options = ""
+ freemem_command = "free -g | grep Mem: | awk '{ print $2 }'"
+ ret, out, _ = g.run(server, freemem_command)
+ memory = int(out)
+ g.log.info("Memory = %i", memory)
+ options_list.append("-d %s -u %s" % (directory_to_run[index],
+ username))
+ if memory >= 8:
+ options_list.append("-r 16G -s 16G -n 0 -m TEST -f -b")
+
+ options = " ".join(options_list)
+ options_for_each_servers.append(options)
+
+ proc_list = []
+ for index, server in enumerate(servers):
+ bonnie_command = "bonnie++ %s" % (options_for_each_servers[index])
+ proc = g.run_async(server, bonnie_command)
+ proc_list.append(proc)
+
+ for index, proc in enumerate(proc_list):
+ results = proc.async_communicate()
+ if results[0] != 0:
+ g.log.error("Bonnie test failed on server %s" % servers[index])
+ rt = False
+
+ for index, server in enumerate(servers):
+ ret, out, _ = g.run(server, "rm -rf %s/Bonnie.*"
+ % directory_to_run[index])
+ if ret != 0:
+ g.log.error("Failed to remove files from %s" % server)
+ rt = False
+
+ for server in servers:
+ ret, out, _ = g.run(server, "yum -y remove bonnie++")
+ if ret != 0:
+ g.log.error("Failed to remove bonnie from %s" % server)
+ return False
+ return rt
+
+
+def run_fio(servers, directory_to_run):
+ """
+ Module to run fio test suite on the given servers.
+
+ Args:
+ servers (list): servers in which tests to be run.
+ directory_to_run (list): directory path where tests will run for
+ each server.
+
+ Returns:
+ bool: True, if test passes in all servers, False otherwise
+
+ Example:
+ run_fio(["abc.com", "def.com"], ["/mnt/test1", "/mnt/test2"])
+ """
+
+ g.log.info("Running fio tests on %s" % ','.join(servers))
+ rt = True
+
+ # Installing fio if not installed
+ results = g.run_parallel(servers, "yum list installed fio")
+ for index, server in enumerate(servers):
+ if results[server][0] != 0:
+ ret, out, _ = g.run(server,
+ "yum list installed fio || "
+ "yum -y install fio")
+ if ret != 0:
+ g.log.error("Failed to install bonnie on %s" % server)
+ return False
+
+ # building job file for running fio
+ # TODO: parametrizing the fio and to get input values from user
+ job_file = "/tmp/fio_job.ini"
+ cmd = ("echo -e '[global]\nrw=randrw\nio_size=1g\nfsync_on_close=1\n"
+ "size=4g\nbs=64k\nrwmixread=20\nopenfiles=1\nstartdelay=0\n"
+ "ioengine=sync\n[write]\ndirectory=%s\nnrfiles=1\n"
+ "filename_format=fio_file.$jobnum.$filenum\nnumjobs=8' "
+ "> %s" % (directory_to_run[index], job_file))
+
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Failed to create fio job file")
+ rt = False
+
+ proc_list = []
+ for index, server in enumerate(servers):
+ fio_command = "fio %s" % (job_file)
+ proc = g.run_async(server, fio_command)
+ proc_list.append(proc)
+
+ for index, proc in enumerate(proc_list):
+ results = proc.async_communicate()
+ if results[0] != 0:
+ g.log.error("fio test failed on server %s" % servers[index])
+ rt = False
+
+ for index, server in enumerate(servers):
+ ret, out, _ = g.run(server, "rm -rf %s/fio_file.*"
+ % directory_to_run[index])
+ if ret != 0:
+ g.log.error("Failed to remove files from %s" % server)
+ rt = False
+
+ for index, server in enumerate(servers):
+ ret, out, _ = g.run(server, "rm -rf %s" % job_file)
+ if ret != 0:
+ g.log.error("Failed to remove job file from %s" % server)
+ rt = False
+
+ for server in servers:
+ ret, out, _ = g.run(server, "yum -y remove fio")
+ if ret != 0:
+ g.log.error("Failed to remove fio from %s" % server)
+ return False
+ return rt
+
+
+def run_mixed_io(servers, io_tools, directory_to_run):
+ """
+ Module to run different io patterns on each given servers.
+
+ Args:
+ servers (list): servers in which tests to be run.
+ io_tools (list): different io tools. Currently fio, bonnie are
+ supported.
+ directory_to_run (list): directory path where tests will run for
+ each server.
+
+ Returns:
+ bool: True, if test passes in all servers, False otherwise
+
+ Example:
+ run_mixed_io(["abc.com", "def.com"], ["/mnt/test1", "/mnt/test2"])
+ """
+
+ g.log.info("Running mixed IO tests on %s" % ','.join(servers))
+
+ # Assigning IO tool to each server in round robin way
+ if len(servers) > len(io_tools):
+ for index, tool in enumerate(io_tools):
+ io_tools.append(io_tools[index])
+ if len(servers) == len(io_tools):
+ break
+ server_io_dict = {}
+ for items in zip(servers, io_tools):
+ server_io_dict[items[0]] = items[1]
+
+ io_dict = {'fio': run_fio,
+ 'bonnie': run_bonnie}
+
+ func_list = []
+ for index, server in enumerate(servers):
+ tmp_list = ([server], [directory_to_run[index]])
+ tmp_list_item = (io_dict[server_io_dict[server]], tmp_list)
+ func_list.append(tmp_list_item)
+
+ pool = Pool()
+ results = []
+ ret = True
+ for func, func_args in func_list:
+ results.append(pool.apply_async(func, func_args))
+ for result in results:
+ ret = ret & result.get()
+ pool.terminate()
+ return ret
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py
new file mode 100644
index 000000000..4a414c09f
--- /dev/null
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py
@@ -0,0 +1,93 @@
+# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Test Cases in this module tests the nfs ganesha feature
+ while running different IO patterns.
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaVolumeBaseClass
+from glustolibs.gluster.lib_utils import install_epel
+from glustolibs.io.utils import run_bonnie, run_fio, run_mixed_io
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['nfs']])
+class TestNfsGaneshaWithDifferentIOPatterns(NfsGaneshaVolumeBaseClass):
+ """
+ Tests Nfs Ganesha stability by running different IO Patterns
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls)
+ if not install_epel(cls.clients):
+ raise ExecutionError("Failed to install epel")
+
+ def test_run_bonnie_from_multiple_clients(self):
+
+ directory_to_run = []
+ for mount in self.mounts:
+ directory_to_run.append(mount.mountpoint)
+
+ # Running Bonnie tests from multiple clients
+ ret = run_bonnie(self.clients, directory_to_run)
+ self.assertTrue(ret, ("Bonnie test failed while running tests on %s"
+ % self.clients))
+
+ # pcs status output
+ _, _, _ = g.run(self.servers[0], "pcs status")
+
+ def test_run_fio_from_multiple_clients(self):
+
+ directory_to_run = []
+ for mount in self.mounts:
+ directory_to_run.append(mount.mountpoint)
+
+ # Running fio tests from multiple clients
+ ret = run_fio(self.clients, directory_to_run)
+ self.assertTrue(ret, ("fio test failed while running tests on %s"
+ % self.clients))
+
+ # pcs status output
+ _, _, _ = g.run(self.servers[0], "pcs status")
+
+ def test_run_mixed_io_from_multiple_clients(self):
+
+ directory_to_run = []
+ for mount in self.mounts:
+ directory_to_run.append(mount.mountpoint)
+
+ # Running mixed IOs from multiple clients
+ # TODO: parametrizing io_tools and get the inputs from user.
+ io_tools = ['bonnie', 'fio']
+ ret = run_mixed_io(self.clients, io_tools, directory_to_run)
+ self.assertTrue(ret, ("fio test failed while running tests on %s"
+ % self.clients))
+
+ # pcs status output
+ _, _, _ = g.run(self.servers[0], "pcs status")
+
+ @classmethod
+ def tearDownClass(cls):
+ (NfsGaneshaVolumeBaseClass.
+ tearDownClass.
+ im_func(cls,
+ teardown_nfs_ganesha_cluster=False))