summaryrefslogtreecommitdiffstats
path: root/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py')
-rwxr-xr-xtests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py224
1 files changed, 175 insertions, 49 deletions
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
index 1e0956508..18feef31b 100755
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,53 +18,108 @@
Test Cases in this module test NFS-Ganesha Sanity.
"""
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import runs_on
-from glustolibs.gluster.nfs_ganesha_libs import (
- NfsGaneshaVolumeBaseClass,
- NfsGaneshaIOBaseClass)
+
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.misc.misc_libs import (
+ upload_scripts,
+ git_clone_and_compile)
+from glustolibs.gluster.nfs_ganesha_ops import (
+ is_nfs_ganesha_cluster_in_healthy_state,
+ set_acl)
+from glustolibs.io.utils import validate_io_procs
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaSanity(NfsGaneshaVolumeBaseClass):
+class TestNfsGaneshaSanity(GlusterBaseClass):
"""
Tests to verify NFS Ganesha Sanity.
"""
-
@classmethod
def setUpClass(cls):
- NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls)
+ """
+ Setup nfs-ganesha if not exists.
+ Upload IO scripts to clients
+ """
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Upload IO scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ # Cloning posix test suite
+ cls.dir_name = "repo_dir"
+ link = "https://github.com/ffilz/ntfs-3g-pjd-fstest.git"
+ ret = git_clone_and_compile(cls.clients, link, cls.dir_name,
+ compile_option=False)
+ if not ret:
+ raise ExecutionError("Failed to clone test repo")
+ g.log.info("Successfully cloned test repo on client")
+ cmd = "cd /root/repo_dir; sed 's/ext3/glusterfs/g' tests/conf; make"
+ for client in cls.clients:
+ ret, _, _ = g.run(client, cmd)
+ if ret == 0:
+ g.log.info("Test repo successfully compiled on"
+ "client %s" % client)
+ else:
+ raise ExecutionError("Failed to compile test repo")
+
+ def setUp(self):
+ """
+ Setup and mount volume
+ """
+ g.log.info("Starting to setup and mount volume %s", self.volname)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
def test_nfs_ganesha_HA_Basic_IO(self):
"""
Tests to create an HA cluster and run basic IO
"""
- # Starting IO on the mounts.Let's do iozone first.
+ # Starting IO on the mounts
+ all_mounts_procs = []
+ count = 1
for mount_obj in self.mounts:
- # Make sure you run relevant setup playbooks,view README !
- g.log.info("Running iozone on %s", mount_obj.client_system)
- cmd = ("cd %s ;iozone -a" % (mount_obj.mountpoint))
- ret, out, err = g.run(mount_obj.client_system, cmd)
- if ret == 0:
- g.log.info(" Iozone run successful")
- else:
- g.log.error("ERROR! Drastic Iozone error encountered !")
- self.assertEqual(ret, 0, "Iozone run failed!")
-
- # Check for crashes after iozone run
- g.log.info("Checking for Cluster Status after iozone run")
- ret, out, err = g.run(self.servers[0],
- "/usr/libexec/ganesha/ganesha-ha.sh --status"
- " /var/run/gluster/shared_storage/nfs-ganesha")
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
- if "HEALTHY" in out:
- g.log.info("Cluster is HEALTHY,Continuing..")
+ # Validate IO
+ g.log.info("Validating IO's")
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("Successfully validated all IO")
- else:
- g.log.error("ERROR! Cluster unhealthy,check for cores!")
- self.assertEqual(ret, 0, "Iozone run failed! Cluster Unhealthy")
+ # Check nfs-ganesha status
+ g.log.info("Checking for Cluster Status after IO run")
+ ret = is_nfs_ganesha_cluster_in_healthy_state(self.mnode)
+ self.assertTrue(ret, "Nfs Ganesha cluster is not healthy after "
+ "running IO")
# Running kernel untar now,single loop for the sanity test
g.log.info("Running kernel untars now")
@@ -73,29 +128,100 @@ class TestNfsGaneshaSanity(NfsGaneshaVolumeBaseClass):
"wget https://www.kernel.org/pub/linux/kernel/v2.6"
"/linux-2.6.1.tar.gz;"
"tar xvf linux-2.6.1.tar.gz" % mount_obj.mountpoint)
- ret, out, err = g.run(mount_obj.client_system, cmd)
- if ret == 0:
- g.log.info("Successfully untared the tarball!")
- else:
- g.log.error("ERROR ! Kernel untar errored out!")
- self.assertEqual(ret, 0, "Kernel untar failed!")
+ ret, _, _ = g.run(mount_obj.client_system, cmd)
+ self.assertEqual(ret, 0, "Kernel untar failed!")
+ g.log.info("Kernel untar successful on %s"
+ % mount_obj.client_system)
- # Check for crashes after kernel untar
+ # Check nfs-ganesha status
g.log.info("Checking for Cluster Status after kernel untar")
- ret, out, err = g.run(self.servers[0],
- "/usr/libexec/ganesha/ganesha-ha.sh --status"
- " /var/run/gluster/shared_storage/nfs-ganesha")
+ ret = is_nfs_ganesha_cluster_in_healthy_state(self.mnode)
+ self.assertTrue(ret, "Nfs Ganesha cluster is not healthy after "
+ "kernel untar")
+
+ def test_nfs_ganesha_posix_compliance(self):
+ """
+ Run Posix Compliance Suite with ACL enabled/disabled.
+ """
+ # Run test with ACL enabled
+
+ # Enable ACL.
+ ret = set_acl(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable ACL")
+ g.log.info("ACL successfully enabled")
+ # Run test
+ for mount_object in self.mounts:
+ g.log.info("Running test now")
+ cmd = ("cd %s ; prove -r /root/%s"
+ % (mount_object.mountpoint, self.dir_name))
+ ret, _, _ = g.run(mount_object.client_system, cmd)
+ # Not asserting here,so as to continue with ACL disabled.
+ if ret != 0:
+ g.log.error("Posix Compliance Suite failed")
+ g.log.info("Continuing with ACL disabled")
+
+ # Check ganesha cluster status
+ g.log.info("Checking ganesha cluster status")
+ self.assertTrue(is_nfs_ganesha_cluster_in_healthy_state(self.mnode),
+ "Cluster is not healthy after test")
+ g.log.info("Ganesha cluster is healthy after the test with ACL "
+ "enabled")
+
+ # Now run test with ACL disabled
+
+ # Disable ACL
+ ret = set_acl(self.mnode, self.volname, acl=False,
+ do_refresh_config=True)
+ self.assertEqual(ret, 0, "Failed to disable ACL")
+ g.log.info("ACL successfully disabled")
- if "HEALTHY" in out:
- g.log.info("Cluster is HEALTHY,Continuing..")
+ # Run test
+ for mount_object in self.mounts:
+ cmd = ("cd %s ; prove -r /root/%s"
+ % (mount_object.mountpoint, self.dir_name))
+ # No assert , known failures with Posix Compliance and glusterfs
+ ret, _, _ = g.run(mount_object.client_system, cmd)
+ if ret != 0:
+ g.log.error("Posix Compliance Suite failed. "
+ "Full Test Summary in Glusto Logs")
+ # Check ganesha cluster status
+ g.log.info("Checking ganesha cluster status")
+ self.assertTrue(is_nfs_ganesha_cluster_in_healthy_state(self.mnode),
+ "Cluster is not healthy after test")
+ g.log.info("Ganesha cluster is healthy after the test with ACL"
+ " disabled")
+
+ def tearDown(self):
+ """
+ Unmount and cleanup volume
+ """
+ # Unmount volume
+ ret = self.unmount_volume(self.mounts)
+ if ret:
+ g.log.info("Successfully unmounted the volume")
else:
- g.log.error("ERROR! Cluster unhealthy after I/O,check for cores!")
- self.assertEqual(ret, 0, "Cluster unhealthy after Kernel untar")
+ g.log.error("Failed to unmount volume")
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaIOBaseClass.
- tearDownClass.
- im_func(cls,
- teardown_nfsganesha_cluster=False))
+ # Cleanup volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Cleanup volume %s completed successfully", self.volname)
+
+ # Cleanup test repo
+ flag = 0
+ for client in self.clients:
+ ret, _, _ = g.run(client, "rm -rf /root/%s" % self.dir_name)
+ if ret:
+ g.log.error("Failed to cleanup test repo on "
+ "client %s" % client)
+ flag = 1
+ else:
+ g.log.info("Test repo successfully cleaned on "
+ "client %s" % client)
+ if flag:
+ raise ExecutionError("Test repo deletion failed. "
+ "Check log errors for more info")
+ else:
+ g.log.info("Test repo cleanup successfull on all clients")