summaryrefslogtreecommitdiffstats
path: root/tests/functional/nfs_ganesha
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/nfs_ganesha')
-rw-r--r--tests/functional/nfs_ganesha/README.md (renamed from tests/functional/nfs_ganesha/README)21
-rw-r--r--tests/functional/nfs_ganesha/root-squash/__init__.py0
-rw-r--r--tests/functional/nfs_ganesha/root-squash/test_nfs_ganesha_root_squash.py162
-rw-r--r--tests/functional/nfs_ganesha/root-squash/test_nfs_ganesha_rootsquash_multiple_client.py174
-rw-r--r--tests/functional/nfs_ganesha/root-squash/test_root_squash_with_glusterd_restart.py170
-rwxr-xr-xtests/functional/nfs_ganesha/root-squash/test_root_squash_with_volume_restart.py177
-rw-r--r--tests/functional/nfs_ganesha/test_cthon.py21
-rw-r--r--tests/functional/nfs_ganesha/test_ganesha_add_brick.py33
-rw-r--r--tests/functional/nfs_ganesha/test_ganesha_remove_brick.py140
-rw-r--r--tests/functional/nfs_ganesha/test_ganesha_replace_brick.py34
-rw-r--r--tests/functional/nfs_ganesha/test_new_mount_while_io_in_progress.py33
-rw-r--r--tests/functional/nfs_ganesha/test_new_volume_while_io_in_progress.py34
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py57
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py55
-rwxr-xr-xtests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py125
-rwxr-xr-xtests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py162
16 files changed, 1147 insertions, 251 deletions
diff --git a/tests/functional/nfs_ganesha/README b/tests/functional/nfs_ganesha/README.md
index 24c26df04..d67544bf8 100644
--- a/tests/functional/nfs_ganesha/README
+++ b/tests/functional/nfs_ganesha/README.md
@@ -1,5 +1,6 @@
-Scope of Testing:
+# NFS Ganesha Tests
+Scope of Testing:
Nfs Ganesha functional tests includes test scripts specific to nfs ganesha
component such as high availability, nfsv4 acls, root squash, locks,
volume exports, subdirectory exports from client and server side, dynamic
@@ -8,14 +9,14 @@ refresh config.
Configs to change in glusto_tests_config.yml file for running the tests:
In cluster_config -> nfs_ganesha section,
- - Set enable: True
- - Give the number of nodes to participate in nfs ganesha cluster in
- integer format.
- - Virtual IPs for each nodes which will be part of nfs ganesha cluster
- in list format.
+- Set enable: True
+- Give the number of nodes to participate in nfs ganesha cluster in
+ integer format.
+- Virtual IPs for each nodes which will be part of nfs ganesha cluster
+ in list format.
In mounts section, for each mount
- - Set protocol to 'nfs'.
- - For v3 mount, set options: 'vers=3'
- - For v4 mount, set options: 'vers=4.0'
- - If 'options' is set to empty string, it takes v3 mount by default.
+- Set protocol to 'nfs'.
+- For v3 mount, set options: 'vers=3'
+- For v4 mount, set options: 'vers=4.0'
+- If 'options' is set to empty string, it takes v3 mount by default.
diff --git a/tests/functional/nfs_ganesha/root-squash/__init__.py b/tests/functional/nfs_ganesha/root-squash/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/functional/nfs_ganesha/root-squash/__init__.py
diff --git a/tests/functional/nfs_ganesha/root-squash/test_nfs_ganesha_root_squash.py b/tests/functional/nfs_ganesha/root-squash/test_nfs_ganesha_root_squash.py
new file mode 100644
index 000000000..1f91b33d0
--- /dev/null
+++ b/tests/functional/nfs_ganesha/root-squash/test_nfs_ganesha_root_squash.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Test Cases in this module tests the nfs ganesha version 3 and 4
+ rootsquash functionality cases.
+"""
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
+from glustolibs.gluster.nfs_ganesha_libs import (
+ wait_for_nfs_ganesha_volume_to_get_unexported)
+from glustolibs.io.utils import validate_io_procs, get_mounts_stat
+from glustolibs.gluster.nfs_ganesha_ops import (
+ set_root_squash,
+ unexport_nfs_ganesha_volume)
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['nfs']])
+class TestNfsGaneshaRootSquash(GlusterBaseClass):
+ """
+ Tests to verify Nfs Ganesha v3/v4 rootsquash stability
+ Steps:
+ 1. Create some files and dirs inside mount point
+ 2. Check for owner and group
+ 3. Set permission as 777 for mount point
+ 4. Enable root-squash on volume
+ 5. Create some more files and dirs
+ 6. Check for owner and group for any file
+ 7. Edit file created by root user
+ """
+ def setUp(self):
+ """
+ Setup Volume
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
+
+ def test_rootsquash_enable(self):
+ # Start IO on mount point.
+ self.all_mounts_procs = []
+ cmd = ("for i in {1..10}; do touch %s/file$i; done"
+ % self.mounts[0].mountpoint)
+ proc = g.run_async(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Validate IO
+ ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfull in getting stats of files/dirs "
+ "from mount point")
+
+ # Check for owner and group of random file
+ for mount_obj in self.mounts:
+ cmd = ("ls -l %s/file5 | awk '{ print $3, $4 }' |sort"
+ % mount_obj.mountpoint)
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ self.assertFalse(ret, err)
+ self.assertIn("root root", out, "Owner and group is not ROOT")
+ g.log.info("Owner and group of file is ROOT")
+
+ # Set mount point permission to 777
+ for mount_obj in self.mounts:
+ cmd = ("chmod 777 %s" % mount_obj.mountpoint)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
+ self.assertFalse(ret, err)
+ g.log.info("Mount point permission changed to 777")
+
+ # Enable root-squash on volume
+ ret = set_root_squash(self.servers[0], self.volname)
+ self.assertTrue(ret, "Failed to enable root-squash on volume")
+ g.log.info("root-squash is enable on the volume")
+
+ # Start IO on mount point.
+ self.all_mounts_procs = []
+ cmd = ("for i in {1..10}; do touch %s/Squashfile$i; done"
+ % self.mounts[0].mountpoint)
+ proc = g.run_async(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Validate IO
+ ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfull in getting stats of files/dirs "
+ "from mount point")
+
+ # Check for owner and group of random file
+ for mount_obj in self.mounts:
+ cmd = ("ls -l %s/Squashfile5 | awk '{print $3, $4}' | sort"
+ % mount_obj.mountpoint)
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ self.assertFalse(ret, err)
+ self.assertIn("nfsnobody nfsnobody", out,
+ "Owner and group of file is NOT NFSNOBODY")
+ g.log.info("Owner and group of file is NFSNOBODY")
+
+ # Edit file created by root user
+ for mount_obj in self.mounts:
+ cmd = ("echo hello > %s/file10" % mount_obj.mountpoint)
+ ret, _, _ = g.run(mount_obj.client_system, cmd)
+ self.assertEqual(ret, 1, "nfsnobody user editing file created by "
+ "root user should FAIL")
+ g.log.info("nfsnobody user failed to edit file "
+ "created by root user")
+
+ def tearDown(self):
+
+ # Disable root-squash
+ ret = set_root_squash(self.mnode, self.volname, squash=False,
+ do_refresh_config=True)
+ if not ret:
+ raise ExecutionError("Failed to disable root-squash on nfs "
+ "ganesha cluster")
+ g.log.info("root-squash is disabled on volume")
+
+ # Unexport volume
+ unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ if not ret:
+ raise ExecutionError("Volume %s is not unexported." % self.volname)
+ g.log.info("Unexporting of volume is successful")
+
+ # Unmount and cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if ret:
+ g.log.info("Successfull unmount and cleanup of volume")
+ else:
+ raise ExecutionError("Failed to unmount and cleanup volume")
diff --git a/tests/functional/nfs_ganesha/root-squash/test_nfs_ganesha_rootsquash_multiple_client.py b/tests/functional/nfs_ganesha/root-squash/test_nfs_ganesha_rootsquash_multiple_client.py
new file mode 100644
index 000000000..918f4038c
--- /dev/null
+++ b/tests/functional/nfs_ganesha/root-squash/test_nfs_ganesha_rootsquash_multiple_client.py
@@ -0,0 +1,174 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Test Cases in this module tests the nfs ganesha version 3 and 4
+ rootsquash functionality cases.
+"""
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
+from glustolibs.gluster.nfs_ganesha_libs import (
+ wait_for_nfs_ganesha_volume_to_get_unexported)
+from glustolibs.io.utils import get_mounts_stat
+from glustolibs.gluster.nfs_ganesha_ops import (
+ set_root_squash,
+ unexport_nfs_ganesha_volume)
+from glustolibs.gluster.lib_utils import (append_string_to_file)
+from glustolibs.gluster.glusterfile import set_file_permissions
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['nfs']])
+class TestNfsGaneshaRootSquash(GlusterBaseClass):
+
+ def setUp(self):
+ """
+ Setup Volume
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
+
+ def test_root_squash_enable(self):
+ """
+ Tests to verify Nfs Ganesha rootsquash functionality with multi
+ client
+ Steps:
+ 1. Create some directories on mount point.
+ 2. Create some files inside those directories
+ 3. Set permission as 777 for mount point
+ 4. Enable root-squash on volume
+ 5. Edit file created by root user from client 2
+ It should not allow to edit the file
+ 6. Create some directories on mount point.
+ 7. Create some files inside the directories
+ Files and directories will be created by
+ nfsnobody user
+ 8. Edit the file created in step 7
+ It should allow to edit the file
+ 9. Disable root squash
+ 10. Edit the file created at step 7
+ It should allow to edit the file
+ """
+ # Create Directories on Mount point
+ cmd = ("for i in {1..10}; do mkdir %s/dir$i; done"
+ % self.mounts[0].mountpoint)
+ ret, _, err = g.run(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.assertEqual(ret, 0, err)
+
+ # Create files inside directories on mount point.
+ cmd = ("for i in {1..10}; do touch %s/dir$i/file$i; done"
+ % self.mounts[0].mountpoint)
+ ret, _, err = g.run(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.assertEqual(ret, 0, err)
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successful in getting stats of files/dirs "
+ "from mount point")
+
+ # Set mount point permission to 777
+ ret = set_file_permissions(self.mounts[0].client_system,
+ self.mounts[0].mountpoint, 777)
+ self.assertTrue(ret, "Failed to set permission for directory")
+ g.log.info("Successfully set permissions for directory")
+
+ # Enable root-squash on volume
+ ret = set_root_squash(self.servers[0], self.volname)
+ self.assertTrue(ret, "Failed to enable root-squash on volume")
+ g.log.info("root-squash is enable on the volume")
+
+ # Edit file created by root user from client 2
+ ret = append_string_to_file(self.mounts[1].client_system,
+ "%s/dir5/file5"
+ % self.mounts[1].mountpoint, 'hello')
+ self.assertFalse(ret, "Unexpected:nfsnobody user editing file "
+ "created by root user should FAIL")
+ g.log.info("Successful:nfsnobody user failed to edit file "
+ "created by root user")
+
+ # Create Directories on Mount point
+ cmd = ("for i in {1..10}; do mkdir %s/SquashDir$i; done"
+ % self.mounts[0].mountpoint)
+ ret, _, err = g.run(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.assertEqual(ret, 0, err)
+
+ # Create files inside directories on mount point
+ cmd = ("for i in {1..10}; do touch %s/SquashDir$i/Squashfile$i;"
+ "done" % self.mounts[0].mountpoint)
+ ret, _, err = g.run(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.assertEqual(ret, 0, err)
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successful in getting stats of files/dirs "
+ "from mount point")
+
+ # Edit the file created by nfsnobody user from client 2
+ ret = append_string_to_file(self.mounts[1].client_system,
+ "%s/SquashDir5/Squashfile5"
+ % self.mounts[1].mountpoint,
+ 'hello')
+ self.assertTrue(ret, "Unexpected:nfsnobody user failed to edit "
+ "the file created by nfsnobody user")
+ g.log.info("Successful:nfsnobody user successfully edited the "
+ "file created by nfsnobody user")
+
+ # Disable root-squash
+ ret = set_root_squash(self.servers[0], self.volname, squash=False,
+ do_refresh_config=True)
+ self.assertTrue(ret, "Failed to disable root-squash on volume")
+ g.log.info("root-squash is disabled on the volume")
+
+ # Edit the file created by nfsnobody user from root user
+ ret = append_string_to_file(self.mounts[1].client_system,
+ "%s/SquashDir10/Squashfile10"
+ % self.mounts[1].mountpoint, 'hello')
+ self.assertTrue(ret, "Unexpected:root user failed to edit "
+ "the file created by nfsnobody user")
+ g.log.info("Successful:root user successfully edited the "
+ "file created by nfsnobody user")
+
+ def tearDown(self):
+
+ # Unexport volume
+ unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ if not ret:
+ raise ExecutionError("Failed:Volume %s is not unexported."
+ % self.volname)
+ g.log.info("Unexporting of volume is successful")
+
+ # Unmount and cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if ret:
+ g.log.info("Successful unmount and cleanup of volume")
+ else:
+ raise ExecutionError("Failed to unmount and cleanup volume")
diff --git a/tests/functional/nfs_ganesha/root-squash/test_root_squash_with_glusterd_restart.py b/tests/functional/nfs_ganesha/root-squash/test_root_squash_with_glusterd_restart.py
new file mode 100644
index 000000000..5ed925400
--- /dev/null
+++ b/tests/functional/nfs_ganesha/root-squash/test_root_squash_with_glusterd_restart.py
@@ -0,0 +1,170 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Test Cases in this module tests the nfs ganesha version 3 and 4
+ rootsquash functionality cases.
+"""
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
+from glustolibs.gluster.nfs_ganesha_libs import (
+ wait_for_nfs_ganesha_volume_to_get_unexported)
+from glustolibs.io.utils import get_mounts_stat
+from glustolibs.gluster.nfs_ganesha_ops import (
+ set_root_squash,
+ unexport_nfs_ganesha_volume)
+from glustolibs.gluster.gluster_init import (
+ is_glusterd_running, restart_glusterd)
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
+from glustolibs.gluster.lib_utils import (append_string_to_file)
+from glustolibs.gluster.glusterfile import set_file_permissions
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['nfs']])
+class TestNfsGaneshaRootSquash(GlusterBaseClass):
+ def setUp(self):
+ """
+ Setup Volume
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
+
+ def test_root_squash_enable(self):
+ """
+ Tests to verify Nfs Ganesha rootsquash functionality when glusterd
+ service is restarted
+ Steps:
+ 1. Create some files and dirs inside mount point
+ 2. Set permission as 777 for mount point
+ 3. Enable root-squash on volume
+ 4. Create some more files and dirs
+ 5. Restart glusterd on all the nodes
+ 6. Try to edit file created in step 1
+ It should not allow to edit the file
+ 7. Try to edit the file created in step 5
+ It should allow to edit the file
+ """
+ # Start IO on mount point.
+ cmd = ("for i in {1..10}; do touch %s/file$i; done"
+ % self.mounts[0].mountpoint)
+ ret, _, err = g.run(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.assertEqual(ret, 0, err)
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successful in getting stats of files/dirs "
+ "from mount point")
+
+ # Set mount point permission to 777
+ ret = set_file_permissions(self.mounts[0].client_system,
+ self.mounts[0].mountpoint, 777)
+ self.assertTrue(ret, "Failed to set permission for directory")
+ g.log.info("Successfully set permissions for directory")
+
+ # Enable root-squash on volume
+ ret = set_root_squash(self.servers[0], self.volname)
+ self.assertTrue(ret, "Failed to enable root-squash on volume")
+ g.log.info("root-squash is enable on the volume")
+
+ # Start IO on mount point.
+ cmd = ("for i in {1..10}; do touch %s/Squashfile$i; done"
+ % self.mounts[0].mountpoint)
+ ret, _, err = g.run(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.assertEqual(ret, 0, err)
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfull in getting stats of files/dirs "
+ "from mount point")
+
+ # Restart glusterd on all servers
+ ret = restart_glusterd(self.servers)
+ self.assertTrue(ret, ("Failed to restart glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully restarted glusterd on all servers %s",
+ self.servers)
+
+ # Check if glusterd is running on all servers
+ ret = is_glusterd_running(self.servers)
+ self.assertEqual(ret, 0, ("Failed:Glusterd is not running on all "
+ "servers %s",
+ self.servers))
+ g.log.info("Glusterd is running on all the servers %s", self.servers)
+
+ # Checking if peer is connected.
+ ret = wait_for_peers_to_connect(self.mnode, self.servers)
+ self.assertTrue(ret, "Failed:Peer is not in connected state.")
+ g.log.info("Peers are in connected state.")
+
+ # Edit file created by root user
+ for mount_obj in self.mounts:
+ ret = append_string_to_file(mount_obj.client_system,
+ "%s/file10" % mount_obj.mountpoint,
+ 'hello')
+ self.assertFalse(ret, "Unexpected:nfsnobody user editing file "
+ "created by root user should FAIL")
+ g.log.info("Successful:nfsnobody user failed to edit file "
+ "created by root user")
+
+ # Edit the file created by nfsnobody user
+ for mount_obj in self.mounts:
+ ret = append_string_to_file(mount_obj.client_system,
+ "%s/Squashfile5"
+ % mount_obj.mountpoint,
+ 'hello')
+ self.assertTrue(ret, "Unexpected:nfsnobody user failed to edit "
+ "the file created by nfsnobody user")
+ g.log.info("Successful:nfsnobody user successfully edited the "
+ "file created by nfsnobody user")
+
+ def tearDown(self):
+
+ # Disable root-squash
+ ret = set_root_squash(self.mnode, self.volname, squash=False,
+ do_refresh_config=True)
+ if not ret:
+ raise ExecutionError("Failed to disable root-squash on nfs "
+ "ganesha cluster")
+ g.log.info("root-squash is disabled on volume")
+
+ # Unexport volume
+ unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ if not ret:
+ raise ExecutionError("Failed:Volume %s is not unexported."
+ % self.volname)
+ g.log.info("Unexporting of volume is successful")
+
+ # Unmount and cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if ret:
+ g.log.info("Successful unmount and cleanup of volume")
+ else:
+ raise ExecutionError("Failed to unmount and cleanup volume")
diff --git a/tests/functional/nfs_ganesha/root-squash/test_root_squash_with_volume_restart.py b/tests/functional/nfs_ganesha/root-squash/test_root_squash_with_volume_restart.py
new file mode 100755
index 000000000..424cda09b
--- /dev/null
+++ b/tests/functional/nfs_ganesha/root-squash/test_root_squash_with_volume_restart.py
@@ -0,0 +1,177 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Test Cases in this module tests the nfs ganesha version 3 and 4
+ rootsquash functionality cases.
+"""
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
+from glustolibs.gluster.nfs_ganesha_libs import (
+ wait_for_nfs_ganesha_volume_to_get_unexported,
+ wait_for_nfs_ganesha_volume_to_get_exported)
+from glustolibs.io.utils import get_mounts_stat
+from glustolibs.gluster.nfs_ganesha_ops import (
+ set_root_squash,
+ unexport_nfs_ganesha_volume)
+from glustolibs.gluster.volume_ops import (volume_stop, volume_start)
+from glustolibs.gluster.lib_utils import (append_string_to_file)
+from glustolibs.gluster.glusterfile import set_file_permissions
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['nfs']])
+class TestNfsGaneshaRootSquash(GlusterBaseClass):
+
+ def setUp(self):
+ """
+ Setup Volume
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
+
+ def test_root_squash_enable(self):
+ """
+ Tests to verify Nfs Ganesha rootsquash functionality when volume
+ is restarted
+ Steps:
+ 1. Create some files and dirs inside mount point
+ 2. Set permission as 777 for mount point
+ 3. Enable root-squash on volume
+ 4. Create some more files and dirs
+ 5. Restart volume
+ 6. Try to edit file created in step 1
+ It should not allow to edit the file
+ 7. Try to edit the file created in step 5
+ It should allow to edit the file
+ """
+ # Start IO on mount point.
+ cmd = ("for i in {1..10}; do touch %s/file$i; done"
+ % self.mounts[0].mountpoint)
+ ret, _, err = g.run(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.assertEqual(ret, 0, err)
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successful in getting stats of files/dirs "
+ "from mount point")
+
+ # Set mount point permission to 777
+ ret = set_file_permissions(self.mounts[0].client_system,
+ self.mounts[0].mountpoint, 777)
+ self.assertTrue(ret, "Failed to set permission for directory")
+ g.log.info("Successfully set permissions for directory")
+
+ # Enable root-squash on volume
+ ret = set_root_squash(self.servers[0], self.volname)
+ self.assertTrue(ret, "Failed to enable root-squash on volume")
+ g.log.info("root-squash is enable on the volume")
+
+ # Start IO on mount point.
+ cmd = ("for i in {1..10}; do touch %s/Squashfile$i; done"
+ % self.mounts[0].mountpoint)
+ ret, _, err = g.run(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.assertEqual(ret, 0, err)
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successful in getting stats of files/dirs "
+ "from mount point")
+
+ # Stopping volume
+ ret = volume_stop(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
+ g.log.info("Successful in stopping volume %s" % self.volname)
+
+ # Waiting for few seconds for volume unexport. Max wait time is
+ # 120 seconds.
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ self.assertTrue(ret, ("Failed to unexport volume %s after "
+ "stopping volume" % self.volname))
+ g.log.info("Volume is unexported successfully")
+
+ # Starting volume
+ ret = volume_start(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to start volume %s" % self.volname))
+ g.log.info("Successful in starting volume %s" % self.volname)
+
+ # Waiting for few seconds for volume export. Max wait time is
+ # 120 seconds.
+ ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode,
+ self.volname)
+ self.assertTrue(ret, ("Failed to export volume %s after "
+ "starting volume" % self.volname))
+ g.log.info("Volume is exported successfully")
+
+ # Edit file created by root user
+ for mount_obj in self.mounts:
+ ret = append_string_to_file(mount_obj.client_system,
+ "%s/file10" % mount_obj.mountpoint,
+ 'hello')
+ self.assertFalse(ret, "Unexpected:nfsnobody user editing file "
+ "created by root user should FAIL")
+ g.log.info("Successful:nfsnobody user failed to edit file "
+ "created by root user")
+
+ # Edit the file created by nfsnobody user
+ for mount_obj in self.mounts:
+ ret = append_string_to_file(mount_obj.client_system,
+ "%s/Squashfile5"
+ % mount_obj.mountpoint,
+ 'hello')
+ self.assertTrue(ret, "Unexpected:nfsnobody user failed to edit "
+ "the file created by nfsnobody user")
+ g.log.info("Successful:nfsnobody user successfully edited the "
+ "file created by nfsnobody user")
+
+ def tearDown(self):
+
+ # Disable root-squash
+ ret = set_root_squash(self.mnode, self.volname, squash=False,
+ do_refresh_config=True)
+ if not ret:
+ raise ExecutionError("Failed to disable root-squash on nfs "
+ "ganesha cluster")
+ g.log.info("root-squash is disabled on volume")
+
+ # Unexport volume
+ unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ if not ret:
+ raise ExecutionError("Failed:Volume %s is not unexported."
+ % self.volname)
+ g.log.info("Unexporting of volume is successful")
+
+ # Unmount and cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if ret:
+ g.log.info("Successful unmount and cleanup of volume")
+ else:
+ raise ExecutionError("Failed to unmount and cleanup volume")
diff --git a/tests/functional/nfs_ganesha/test_cthon.py b/tests/functional/nfs_ganesha/test_cthon.py
index 5c1c5869d..78232fdac 100644
--- a/tests/functional/nfs_ganesha/test_cthon.py
+++ b/tests/functional/nfs_ganesha/test_cthon.py
@@ -20,8 +20,7 @@
"""
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaClusterSetupClass
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.io.utils import run_cthon
from glustolibs.misc.misc_libs import git_clone_and_compile
@@ -30,7 +29,7 @@ from glustolibs.misc.misc_libs import git_clone_and_compile
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestCthon(NfsGaneshaClusterSetupClass):
+class TestCthon(GlusterBaseClass):
"""
Cthon test on NFS Ganesha v4.0, v4.1
"""
@@ -40,13 +39,7 @@ class TestCthon(NfsGaneshaClusterSetupClass):
"""
Setup nfs-ganesha if not exists.
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Cloning the cthon test repo
cls.dir_name = "repo_dir"
@@ -65,7 +58,7 @@ class TestCthon(NfsGaneshaClusterSetupClass):
"""
Setup volume
"""
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
g.log.info("Starting to setup volume %s", self.volname)
ret = self.setup_volume(volume_create_force=True)
@@ -90,7 +83,7 @@ class TestCthon(NfsGaneshaClusterSetupClass):
"""
Cleanup volume
"""
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# Cleanup volume
ret = self.cleanup_volume()
@@ -115,7 +108,3 @@ class TestCthon(NfsGaneshaClusterSetupClass):
"Check log errors for more info")
else:
g.log.info("Test repo cleanup successfull on all clients")
-
- @classmethod
- def tearDownClass(cls):
- NfsGaneshaClusterSetupClass.tearDownClass.im_func(cls)
diff --git a/tests/functional/nfs_ganesha/test_ganesha_add_brick.py b/tests/functional/nfs_ganesha/test_ganesha_add_brick.py
index d82dc6363..e3fc6adc9 100644
--- a/tests/functional/nfs_ganesha/test_ganesha_add_brick.py
+++ b/tests/functional/nfs_ganesha/test_ganesha_add_brick.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018-2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,8 +15,8 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from glusto.core import Glusto as g
-from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaClusterSetupClass
-from glustolibs.gluster.gluster_base_class import runs_on
+
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import validate_io_procs, get_mounts_stat
@@ -30,7 +30,7 @@ from glustolibs.gluster.volume_libs import (
@runs_on([['distributed-replicated', 'replicated', 'distributed',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestGaneshaAddBrick(NfsGaneshaClusterSetupClass):
+class TestGaneshaAddBrick(GlusterBaseClass):
"""
Test cases to validate add-brick and rebalance functionality on volumes
exported through nfs-ganesha
@@ -42,22 +42,14 @@ class TestGaneshaAddBrick(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -90,13 +82,14 @@ class TestGaneshaAddBrick(NfsGaneshaClusterSetupClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -177,9 +170,3 @@ class TestGaneshaAddBrick(NfsGaneshaClusterSetupClass):
if not ret:
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
-
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
diff --git a/tests/functional/nfs_ganesha/test_ganesha_remove_brick.py b/tests/functional/nfs_ganesha/test_ganesha_remove_brick.py
new file mode 100644
index 000000000..9e9cf39c2
--- /dev/null
+++ b/tests/functional/nfs_ganesha/test_ganesha_remove_brick.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.io.utils import validate_io_procs, get_mounts_stat
+from glustolibs.gluster.volume_libs import (
+ log_volume_info_and_status, shrink_volume,
+ wait_for_volume_process_to_be_online)
+
+
+@runs_on([['distributed', 'distributed-arbiter',
+ 'distributed-replicated', 'distributed-dispersed'],
+ ['nfs']])
+class TestGaneshaRemoveBrick(GlusterBaseClass):
+ """
+ This test case validates remove brick functionality on volumes exported
+ through nfs-ganesha
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Setup nfs-ganesha if not exists.
+ Upload IO scripts to clients
+ """
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Upload IO scripts for running IO on mounts
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ """
+ Setup Volume and Mount Volume
+ """
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
+
+ def test_nfs_ganesha_remove_brick(self):
+ """
+ Verify remove brick operation while IO is running
+ Steps:
+ 1. Start IO on mount points
+ 2. Perform remove brick operation
+ 3. Validate IOs
+ """
+ # pylint: disable=too-many-statements
+ # Start IO on all mount points
+ all_mounts_procs, count = [], 1
+ for mount_obj in self.mounts:
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path, count,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count += 10
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfully got stat of all files/dirs created")
+
+ # Perform remove brick operation
+ ret = shrink_volume(self.mnode, self.volname)
+ self.assertTrue(ret, ("Remove brick operation failed on "
+ "%s", self.volname))
+ g.log.info("Remove brick operation is successful on "
+ "volume %s", self.volname)
+
+ # Wait for volume processes to be online
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("All volume %s processes failed to come up "
+ "online", self.volname))
+ g.log.info("All volume %s processes came up "
+ "online successfully after remove brick operation",
+ self.volname)
+
+ # Log volume info and status after performing remove brick
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Validate IO
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("Successfully validated all io's")
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfully got stat of all files/dirs created")
+
+ def tearDown(self):
+ """
+ Unmount and cleanup volume
+ """
+ # Unmount volume
+ ret = self.unmount_volume(self.mounts)
+ if ret:
+ g.log.info("Successfully unmounted the volume")
+ else:
+ g.log.error("Failed to unmount volume")
+
+ # Cleanup volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Cleanup volume %s completed successfully", self.volname)
diff --git a/tests/functional/nfs_ganesha/test_ganesha_replace_brick.py b/tests/functional/nfs_ganesha/test_ganesha_replace_brick.py
index 918ac6e2c..f87fd03f2 100644
--- a/tests/functional/nfs_ganesha/test_ganesha_replace_brick.py
+++ b/tests/functional/nfs_ganesha/test_ganesha_replace_brick.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018-2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -13,9 +13,10 @@
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
from glusto.core import Glusto as g
-from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaClusterSetupClass
-from glustolibs.gluster.gluster_base_class import runs_on
+
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import validate_io_procs, get_mounts_stat
@@ -29,7 +30,7 @@ from glustolibs.gluster.heal_libs import monitor_heal_completion
@runs_on([['distributed-replicated', 'replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestGaneshaReplaceBrick(NfsGaneshaClusterSetupClass):
+class TestGaneshaReplaceBrick(GlusterBaseClass):
"""
Test cases to validate remove brick functionality on volumes
exported through nfs-ganesha
@@ -41,22 +42,14 @@ class TestGaneshaReplaceBrick(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -90,13 +83,14 @@ class TestGaneshaReplaceBrick(NfsGaneshaClusterSetupClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -168,9 +162,3 @@ class TestGaneshaReplaceBrick(NfsGaneshaClusterSetupClass):
if not ret:
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
-
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
diff --git a/tests/functional/nfs_ganesha/test_new_mount_while_io_in_progress.py b/tests/functional/nfs_ganesha/test_new_mount_while_io_in_progress.py
index 77c484ace..798d5b7df 100644
--- a/tests/functional/nfs_ganesha/test_new_mount_while_io_in_progress.py
+++ b/tests/functional/nfs_ganesha/test_new_mount_while_io_in_progress.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018-2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,8 +15,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from copy import deepcopy
from glusto.core import Glusto as g
-from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaClusterSetupClass
-from glustolibs.gluster.gluster_base_class import runs_on
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import validate_io_procs, get_mounts_stat
@@ -24,7 +23,7 @@ from glustolibs.io.utils import validate_io_procs, get_mounts_stat
@runs_on([['replicated', 'distributed', 'distributed-replicated'],
['nfs']])
-class TestMountWhileIoInProgress(NfsGaneshaClusterSetupClass):
+class TestMountWhileIoInProgress(GlusterBaseClass):
"""
Test cases to validate new mount while IO is going on
"""
@@ -35,22 +34,14 @@ class TestMountWhileIoInProgress(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -102,14 +93,14 @@ class TestMountWhileIoInProgress(NfsGaneshaClusterSetupClass):
# Start IO
g.log.info("Starting IO on %s:%s", mount_object.client_system,
mount_object.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path,
- dirname_start_num,
- mount_object.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path,
+ dirname_start_num, mount_object.mountpoint))
proc = g.run_async(mount_object.client_system, cmd,
user=mount_object.user)
all_mounts_procs.append(proc)
@@ -143,9 +134,3 @@ class TestMountWhileIoInProgress(NfsGaneshaClusterSetupClass):
if not ret:
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
-
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
diff --git a/tests/functional/nfs_ganesha/test_new_volume_while_io_in_progress.py b/tests/functional/nfs_ganesha/test_new_volume_while_io_in_progress.py
index c4c39a9d1..e8491ebfb 100644
--- a/tests/functional/nfs_ganesha/test_new_volume_while_io_in_progress.py
+++ b/tests/functional/nfs_ganesha/test_new_volume_while_io_in_progress.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018-2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,9 +20,10 @@
"""
from copy import deepcopy
from glusto.core import Glusto as g
+
from glustolibs.gluster.nfs_ganesha_libs import (
- NfsGaneshaClusterSetupClass, wait_for_nfs_ganesha_volume_to_get_exported)
-from glustolibs.gluster.gluster_base_class import runs_on
+ wait_for_nfs_ganesha_volume_to_get_exported)
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import validate_io_procs, get_mounts_stat
@@ -36,7 +37,7 @@ from glustolibs.gluster.nfs_ganesha_ops import export_nfs_ganesha_volume
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNewVolumeWhileIoInProgress(NfsGaneshaClusterSetupClass):
+class TestNewVolumeWhileIoInProgress(GlusterBaseClass):
"""
Test cases to verify creation, export and mount of new volume while IO is
going on another volume exported through nfs-ganesha.
@@ -47,22 +48,14 @@ class TestNewVolumeWhileIoInProgress(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -97,13 +90,14 @@ class TestNewVolumeWhileIoInProgress(NfsGaneshaClusterSetupClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -242,9 +236,3 @@ class TestNewVolumeWhileIoInProgress(NfsGaneshaClusterSetupClass):
if not ret:
raise ExecutionError("Failed to cleanup volume %s", volume)
g.log.info("Volume %s deleted successfully", volume)
-
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py
index 991481e20..7b0865c0a 100644
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py
@@ -19,33 +19,45 @@
ACL functionality.
"""
-from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import runs_on
-from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaVolumeBaseClass
-from glustolibs.gluster.nfs_ganesha_ops import set_acl
-from glustolibs.gluster.exceptions import ExecutionError
import time
import re
+from glusto.core import Glusto as g
+from glustolibs.gluster.nfs_ganesha_ops import (
+ set_acl,
+ unexport_nfs_ganesha_volume)
+from glustolibs.gluster.nfs_ganesha_libs import (
+ wait_for_nfs_ganesha_volume_to_get_unexported)
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaAcls(NfsGaneshaVolumeBaseClass):
+class TestNfsGaneshaAcls(GlusterBaseClass):
"""
Tests to verify Nfs Ganesha v4 ACL stability
"""
-
- @classmethod
- def setUpClass(cls):
- NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls)
-
def setUp(self):
- ret = set_acl(self.mnode, self.volname, acl=True,
- do_refresh_config=True)
+ """
+ Setup Volume
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup and mount volume
+ g.log.info("Starting to setip and mount volume %s", self.volname)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
+
+ # Enable ACL
+ ret = set_acl(self.mnode, self.volname)
if not ret:
raise ExecutionError("Failed to enable ACL on the nfs "
"ganesha cluster")
+ g.log.info("Successfully enabled ACL")
def test_nfsv4_acls(self):
# pylint: disable=too-many-locals
@@ -105,12 +117,23 @@ class TestNfsGaneshaAcls(NfsGaneshaVolumeBaseClass):
"acl test" % dirname)
def tearDown(self):
+
+ # Disable ACL
ret = set_acl(self.mnode, self.volname, acl=False,
do_refresh_config=True)
if not ret:
raise ExecutionError("Failed to disable ACL on nfs "
"ganesha cluster")
-
- @classmethod
- def tearDownClass(cls):
- NfsGaneshaVolumeBaseClass.tearDownClass.im_func(cls)
+ # Unexport volume
+ unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ if not ret:
+ raise ExecutionError("Volume %s is not unexported." % self.volname)
+
+ # Unmount and cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if ret:
+ g.log.info("Successfull unmount and cleanup of volume")
+ else:
+ raise ExecutionError("Failed to unmount and cleanup volume")
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py
index b6a1a4391..0f9c17156 100644
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py
@@ -20,9 +20,8 @@
"""
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import runs_on
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaVolumeBaseClass
from glustolibs.gluster.lib_utils import install_epel
from glustolibs.io.utils import run_bonnie, run_fio, run_mixed_io
@@ -30,19 +29,37 @@ from glustolibs.io.utils import run_bonnie, run_fio, run_mixed_io
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaWithDifferentIOPatterns(NfsGaneshaVolumeBaseClass):
+class TestNfsGaneshaWithDifferentIOPatterns(GlusterBaseClass):
"""
Tests Nfs Ganesha stability by running different IO Patterns
"""
@classmethod
def setUpClass(cls):
- NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls)
+ """
+ Setup nfs-ganesha if not exists.
+ """
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Install epel
if not install_epel(cls.clients):
raise ExecutionError("Failed to install epel")
- def test_run_bonnie_from_multiple_clients(self):
+ def setUp(self):
+ """
+ Setup and mount volume
+ """
+ g.log.info("Starting to setup and mount volume %s", self.volname)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
+ def test_run_bonnie_from_multiple_clients(self):
+ """
+ Run bonnie test
+ """
directory_to_run = []
for mount in self.mounts:
directory_to_run.append(mount.mountpoint)
@@ -56,7 +73,9 @@ class TestNfsGaneshaWithDifferentIOPatterns(NfsGaneshaVolumeBaseClass):
_, _, _ = g.run(self.servers[0], "pcs status")
def test_run_fio_from_multiple_clients(self):
-
+ """
+ Run fio
+ """
directory_to_run = []
for mount in self.mounts:
directory_to_run.append(mount.mountpoint)
@@ -70,7 +89,9 @@ class TestNfsGaneshaWithDifferentIOPatterns(NfsGaneshaVolumeBaseClass):
_, _, _ = g.run(self.servers[0], "pcs status")
def test_run_mixed_io_from_multiple_clients(self):
-
+ """
+ Run multiple IOs
+ """
directory_to_run = []
for mount in self.mounts:
directory_to_run.append(mount.mountpoint)
@@ -79,8 +100,24 @@ class TestNfsGaneshaWithDifferentIOPatterns(NfsGaneshaVolumeBaseClass):
# TODO: parametrizing io_tools and get the inputs from user.
io_tools = ['bonnie', 'fio']
ret = run_mixed_io(self.clients, io_tools, directory_to_run)
- self.assertTrue(ret, ("fio test failed while running tests on %s"
- % self.clients))
+ self.assertTrue(ret, "IO failed on one or more clients.")
# pcs status output
_, _, _ = g.run(self.servers[0], "pcs status")
+
+ def tearDown(self):
+ """
+ Unmount and cleanup volume
+ """
+ # Unmount volume
+ ret = self.unmount_volume(self.mounts)
+ if ret:
+ g.log.info("Successfully unmounted the volume")
+ else:
+ g.log.error("Failed to unmount volume")
+
+ # Cleanup volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Cleanup volume %s completed successfully", self.volname)
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
index 5386b03f5..18feef31b 100755
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,19 +18,22 @@
Test Cases in this module test NFS-Ganesha Sanity.
"""
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import runs_on
-from glustolibs.gluster.nfs_ganesha_libs import NfsGaneshaClusterSetupClass
+
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.misc.misc_libs import (
+ upload_scripts,
+ git_clone_and_compile)
from glustolibs.gluster.nfs_ganesha_ops import (
- is_nfs_ganesha_cluster_in_healthy_state)
+ is_nfs_ganesha_cluster_in_healthy_state,
+ set_acl)
from glustolibs.io.utils import validate_io_procs
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaSanity(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaSanity(GlusterBaseClass):
"""
Tests to verify NFS Ganesha Sanity.
"""
@@ -40,28 +43,37 @@ class TestNfsGaneshaSanity(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
cls.clients)
+ # Cloning posix test suite
+ cls.dir_name = "repo_dir"
+ link = "https://github.com/ffilz/ntfs-3g-pjd-fstest.git"
+ ret = git_clone_and_compile(cls.clients, link, cls.dir_name,
+ compile_option=False)
+ if not ret:
+ raise ExecutionError("Failed to clone test repo")
+ g.log.info("Successfully cloned test repo on client")
+ cmd = "cd /root/repo_dir; sed 's/ext3/glusterfs/g' tests/conf; make"
+ for client in cls.clients:
+ ret, _, _ = g.run(client, cmd)
+ if ret == 0:
+ g.log.info("Test repo successfully compiled on"
+ "client %s" % client)
+ else:
+ raise ExecutionError("Failed to compile test repo")
+
def setUp(self):
"""
Setup and mount volume
@@ -84,13 +96,14 @@ class TestNfsGaneshaSanity(NfsGaneshaClusterSetupClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -126,6 +139,59 @@ class TestNfsGaneshaSanity(NfsGaneshaClusterSetupClass):
self.assertTrue(ret, "Nfs Ganesha cluster is not healthy after "
"kernel untar")
+ def test_nfs_ganesha_posix_compliance(self):
+ """
+ Run Posix Compliance Suite with ACL enabled/disabled.
+ """
+ # Run test with ACL enabled
+
+ # Enable ACL.
+ ret = set_acl(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable ACL")
+ g.log.info("ACL successfully enabled")
+ # Run test
+ for mount_object in self.mounts:
+ g.log.info("Running test now")
+ cmd = ("cd %s ; prove -r /root/%s"
+ % (mount_object.mountpoint, self.dir_name))
+ ret, _, _ = g.run(mount_object.client_system, cmd)
+ # Not asserting here,so as to continue with ACL disabled.
+ if ret != 0:
+ g.log.error("Posix Compliance Suite failed")
+ g.log.info("Continuing with ACL disabled")
+
+ # Check ganesha cluster status
+ g.log.info("Checking ganesha cluster status")
+ self.assertTrue(is_nfs_ganesha_cluster_in_healthy_state(self.mnode),
+ "Cluster is not healthy after test")
+ g.log.info("Ganesha cluster is healthy after the test with ACL "
+ "enabled")
+
+ # Now run test with ACL disabled
+
+ # Disable ACL
+ ret = set_acl(self.mnode, self.volname, acl=False,
+ do_refresh_config=True)
+ self.assertEqual(ret, 0, "Failed to disable ACL")
+ g.log.info("ACL successfully disabled")
+
+ # Run test
+ for mount_object in self.mounts:
+ cmd = ("cd %s ; prove -r /root/%s"
+ % (mount_object.mountpoint, self.dir_name))
+ # No assert , known failures with Posix Compliance and glusterfs
+ ret, _, _ = g.run(mount_object.client_system, cmd)
+ if ret != 0:
+ g.log.error("Posix Compliance Suite failed. "
+ "Full Test Summary in Glusto Logs")
+
+ # Check ganesha cluster status
+ g.log.info("Checking ganesha cluster status")
+ self.assertTrue(is_nfs_ganesha_cluster_in_healthy_state(self.mnode),
+ "Cluster is not healthy after test")
+ g.log.info("Ganesha cluster is healthy after the test with ACL"
+ " disabled")
+
def tearDown(self):
"""
Unmount and cleanup volume
@@ -143,8 +209,19 @@ class TestNfsGaneshaSanity(NfsGaneshaClusterSetupClass):
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
+ # Cleanup test repo
+ flag = 0
+ for client in self.clients:
+ ret, _, _ = g.run(client, "rm -rf /root/%s" % self.dir_name)
+ if ret:
+ g.log.error("Failed to cleanup test repo on "
+ "client %s" % client)
+ flag = 1
+ else:
+ g.log.info("Test repo successfully cleaned on "
+ "client %s" % client)
+ if flag:
+ raise ExecutionError("Test repo deletion failed. "
+ "Check log errors for more info")
+ else:
+ g.log.info("Test repo cleanup successfull on all clients")
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
index 63331ec73..bb1f2f71e 100755
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,14 +19,15 @@
refresh configs, cluster enable/disable functionality.
"""
-from time import sleep
+from copy import deepcopy
import os
import re
-from copy import deepcopy
+from time import sleep
+
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import runs_on
+
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.nfs_ganesha_libs import (
- NfsGaneshaClusterSetupClass,
wait_for_nfs_ganesha_volume_to_get_exported,
wait_for_nfs_ganesha_volume_to_get_unexported)
from glustolibs.gluster.nfs_ganesha_ops import (
@@ -47,23 +48,11 @@ from glustolibs.gluster.lib_utils import get_servers_unused_bricks_dict
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaVolumeExports(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaVolumeExports(GlusterBaseClass):
"""
Tests to verify Nfs Ganesha exports, cluster enable/disable
functionality.
"""
- @classmethod
- def setUpClass(cls):
- """
- Setup nfs-ganesha if not exists.
- """
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
def setUp(self):
"""
@@ -170,6 +159,59 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaClusterSetupClass):
"ganesha.enable 'on'" % self.volname)
g.log.info("Exported volume after enabling nfs-ganesha cluster")
+ def test_nfs_ganesha_exportID_after_vol_restart(self):
+ """
+ Tests script to check nfs-ganesha volume gets exported with same
+ Export ID after multiple volume restarts.
+ Steps:
+ 1. Create and Export the Volume
+ 2. Stop and Start the volume multiple times
+ 3. Check for export ID
+ Export ID should not change
+ """
+ for i in range(1, 4):
+ g.log.info("Testing nfs ganesha exportID after volume stop and "
+ "start.\n Count : %s", str(i))
+
+ # Stopping volume
+ ret = volume_stop(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
+ g.log.info("Volume is stopped")
+
+ # Waiting for few seconds for volume unexport. Max wait time is
+ # 120 seconds.
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ self.assertTrue(ret, ("Failed to unexport volume %s after "
+ "stopping volume" % self.volname))
+ g.log.info("Volume is unexported via ganesha")
+
+ # Starting volume
+ ret = volume_start(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to start volume %s" % self.volname))
+ g.log.info("Volume is started")
+
+ # Waiting for few seconds for volume export. Max wait time is
+ # 120 seconds.
+ ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode,
+ self.volname)
+ self.assertTrue(ret, ("Failed to export volume %s after "
+ "starting volume" % self.volname))
+ g.log.info("Volume is exported via ganesha")
+
+ # Check for Export ID
+ cmd = ("cat /run/gluster/shared_storage/nfs-ganesha/exports/"
+ "export.*.conf | grep Export_Id | grep -Eo '[0-9]'")
+ ret, out, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Unable to get export ID of the volume %s"
+ % self.volname)
+ g.log.info("Successful in getting volume export ID: %s " % out)
+ self.assertEqual(out.strip("\n"), "2",
+ "Export ID changed after export and unexport "
+ "of volume: %s" % out)
+ g.log.info("Export ID of volume is same after export "
+ "and export: %s" % out)
+
def tearDown(self):
"""
Unexport volume
@@ -195,17 +237,11 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaClusterSetupClass):
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
-
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaVolumeExportsWithIO(GlusterBaseClass):
"""
Tests to verify nfs ganesha features when IO is in progress.
"""
@@ -215,22 +251,14 @@ class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -261,13 +289,14 @@ class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -338,17 +367,11 @@ class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
-
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaMultiVolumeExportsWithIO(GlusterBaseClass):
"""
Tests to verify multiple volumes gets exported when IO is in progress.
"""
@@ -358,22 +381,14 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -402,13 +417,14 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -517,17 +533,11 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
-
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaSubDirExportsWithIO(GlusterBaseClass):
"""
Tests to verify nfs-ganesha sub directory exports.
"""
@@ -537,13 +547,7 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
@@ -571,14 +575,14 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
g.log.info("Starting IO on all mounts.")
self.all_mounts_procs = []
for mount_obj in mount_objs:
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 15 "
"--max-num-of-dirs 5 "
- "--num-of-files 10 %s" % (self.script_upload_path,
- self.dir_start,
- mount_obj.mountpoint))
+ "--num-of-files 10 %s" % (
+ self.script_upload_path,
+ self.dir_start, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
@@ -757,9 +761,3 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
if not ret:
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
-
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))