summaryrefslogtreecommitdiffstats
path: root/tests/functional
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional')
-rwxr-xr-xtests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py639
1 files changed, 428 insertions, 211 deletions
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
index 65d7bc6ab..63331ec73 100755
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
@@ -19,53 +19,69 @@
refresh configs, cluster enable/disable functionality.
"""
-import time
+from time import sleep
import os
import re
+from copy import deepcopy
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import runs_on
from glustolibs.gluster.nfs_ganesha_libs import (
- NfsGaneshaVolumeBaseClass,
+ NfsGaneshaClusterSetupClass,
wait_for_nfs_ganesha_volume_to_get_exported,
- wait_for_nfs_ganesha_volume_to_get_unexported,
- NfsGaneshaIOBaseClass)
-from glustolibs.gluster.nfs_ganesha_ops import (enable_acl, disable_acl,
- run_refresh_config,
- enable_nfs_ganesha,
- disable_nfs_ganesha,
- export_nfs_ganesha_volume,
- unexport_nfs_ganesha_volume)
-from glustolibs.gluster.volume_ops import (volume_stop, volume_start,
- get_volume_info)
-from glustolibs.gluster.volume_libs import (get_volume_options, setup_volume,
- cleanup_volume, is_volume_exported,
- log_volume_info_and_status)
-from glustolibs.io.utils import (validate_io_procs,
- list_all_files_and_dirs_mounts,
- wait_for_io_to_complete)
+ wait_for_nfs_ganesha_volume_to_get_unexported)
+from glustolibs.gluster.nfs_ganesha_ops import (
+ is_nfs_ganesha_cluster_in_healthy_state, set_acl, refresh_config,
+ enable_nfs_ganesha, disable_nfs_ganesha, export_nfs_ganesha_volume,
+ unexport_nfs_ganesha_volume)
+from glustolibs.gluster.volume_ops import (volume_stop, volume_start)
+from glustolibs.gluster.volume_libs import (
+ get_volume_options, setup_volume, cleanup_volume,
+ log_volume_info_and_status, wait_for_volume_process_to_be_online,
+ volume_exists)
+from glustolibs.io.utils import validate_io_procs
from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.gluster.lib_utils import get_servers_unused_bricks_dict
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
+class TestNfsGaneshaVolumeExports(NfsGaneshaClusterSetupClass):
"""
Tests to verify Nfs Ganesha exports, cluster enable/disable
functionality.
"""
-
@classmethod
def setUpClass(cls):
- NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls)
+ """
+ Setup nfs-ganesha if not exists.
+ """
+ NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
+
+ # Setup nfs-ganesha if not exists.
+ ret = cls.setup_nfs_ganesha()
+ if not ret:
+ raise ExecutionError("Failed to setup nfs-ganesha cluster")
+ g.log.info("nfs-ganesha cluster is healthy")
+
+ def setUp(self):
+ """
+ Setup and mount volume
+ """
+ g.log.info("Starting to setup and mount volume %s", self.volname)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
def test_nfs_ganesha_export_after_vol_restart(self):
"""
Tests script to check nfs-ganesha volume gets exported after
multiple volume restarts.
"""
-
- for i in range(5):
+ for i in range(1, 6):
g.log.info("Testing nfs ganesha export after volume stop/start."
"Count : %s", str(i))
@@ -96,15 +112,14 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
Tests script to check nfs-ganehsa volume gets exported after
multiple enable/disable of cluster.
"""
-
- for i in range(5):
+ for i in range(1, 6):
g.log.info("Executing multiple enable/disable of nfs ganesha "
"cluster. Count : %s ", str(i))
ret, _, _ = disable_nfs_ganesha(self.mnode)
- self.assertEqual(ret, 0, ("Failed to disable nfs-ganesha cluster"))
+ self.assertEqual(ret, 0, "Failed to disable nfs-ganesha cluster")
- time.sleep(2)
+ sleep(2)
vol_option = get_volume_options(self.mnode, self.volname,
option='ganesha.enable')
if vol_option is None:
@@ -116,9 +131,20 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
"cluster")
ret, _, _ = enable_nfs_ganesha(self.mnode)
- self.assertEqual(ret, 0, ("Failed to enable nfs-ganesha cluster"))
+ self.assertEqual(ret, 0, "Failed to enable nfs-ganesha cluster")
+
+ # Check nfs-ganesha status
+ for itr in range(5):
+ ret = is_nfs_ganesha_cluster_in_healthy_state(self.mnode)
+ if ret:
+ g.log.info("nfs-ganesha cluster is healthy")
+ break
+ g.log.warning("nfs-ganesha cluster is not healthy. "
+ "Iteration: %s", str(itr))
+ self.assertEqual(itr, 4, "Wait timeout: nfs-ganesha cluster "
+ "is not healthy")
+ sleep(3)
- time.sleep(2)
vol_option = get_volume_options(self.mnode, self.volname,
option='ganesha.enable')
if vol_option is None:
@@ -130,275 +156,462 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
"enable of cluster which is unexpected." %
self.volname)
- # Export volume after disable and enable of cluster
- ret, _, _ = export_nfs_ganesha_volume(
- mnode=self.mnode, volname=self.volname)
- self.assertEqual(ret, 0, ("Failed to export volume %s "
- "after disable and enable of cluster"
- % self.volname))
- time.sleep(5)
+ # Export volume after disable and enable of cluster
+ ret, _, _ = export_nfs_ganesha_volume(mnode=self.mnode,
+ volname=self.volname)
+ self.assertEqual(ret, 0, ("Failed to export volume %s "
+ "after disable and enable of cluster"
+ % self.volname))
- # List the volume exports
- _, _, _ = g.run(self.mnode, "showmount -e")
+ # Wait for volume to get exported
+ ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode,
+ self.volname)
+ self.assertTrue(ret, "Volume %s is not exported after setting "
+ "ganesha.enable 'on'" % self.volname)
+ g.log.info("Exported volume after enabling nfs-ganesha cluster")
+
+ def tearDown(self):
+ """
+ Unexport volume
+ Unmount and cleanup volume
+ """
+ # Unexport volume
+ unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ if not ret:
+ raise ExecutionError("Volume %s is not unexported" % self.volname)
+
+ # Unmount volume
+ ret = self.unmount_volume(self.mounts)
+ if ret:
+ g.log.info("Successfully unmounted the volume")
+ else:
+ g.log.error("Failed to unmount volume")
+
+ # Cleanup volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Cleanup volume %s completed successfully", self.volname)
@classmethod
def tearDownClass(cls):
-
- (NfsGaneshaVolumeBaseClass.
+ (NfsGaneshaClusterSetupClass.
tearDownClass.
- im_func(cls,
- teardown_nfs_ganesha_cluster=False))
+ im_func(cls, delete_nfs_ganesha_cluster=False))
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaIOBaseClass):
+class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
"""
- Tests to verfiy nfs ganesha features when IO is in progress.
+ Tests to verify nfs ganesha features when IO is in progress.
"""
+ @classmethod
+ def setUpClass(cls):
+ """
+ Setup nfs-ganesha if not exists.
+ Upload IO scripts to clients
+ """
+ NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
+
+ # Setup nfs-ganesha if not exists.
+ ret = cls.setup_nfs_ganesha()
+ if not ret:
+ raise ExecutionError("Failed to setup nfs-ganesha cluster")
+ g.log.info("nfs-ganesha cluster is healthy")
+
+ # Upload IO scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, script_local_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ """
+ Setup and mount volume
+ """
+ g.log.info("Starting to setup and mount volume %s", self.volname)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
def test_nfs_ganesha_multiple_refresh_configs(self):
"""
Tests script to check nfs-ganehsa volume gets exported and IOs
are running after running multiple refresh configs.
"""
-
self.acl_check_flag = False
- for i in range(6):
+ # Starting IO on the mounts
+ all_mounts_procs = []
+ count = 1
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path, count,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
+
+ for itr in range(1, 7):
# Enabling/Disabling ACLs to modify the export configuration
# before running refresh config
- if i % 2 == 0:
- ret = disable_acl(self.mnode, self. volname)
+ if itr % 2 == 0:
+ # Disable ACL
+ ret = set_acl(self.mnode, self.volname, False, False)
self.assertTrue(ret, ("Failed to disable acl on %s"
% self.volname))
self.acl_check_flag = False
else:
- ret = enable_acl(self.mnode, self. volname)
+ # Enable ACL
+ ret = set_acl(self.mnode, self.volname, True, False)
self.assertTrue(ret, ("Failed to enable acl on %s"
% self.volname))
self.acl_check_flag = True
- ret = run_refresh_config(self.mnode, self. volname)
+ ret = refresh_config(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to run refresh config"
"for volume %s" % self.volname))
+ g.log.info("Refresh-config completed. Iteration:%s", str(itr))
- time.sleep(2)
+ # Check nfs-ganesha cluster status
+ ret = is_nfs_ganesha_cluster_in_healthy_state(self.mnode)
+ self.assertTrue(ret, "nfs-ganesha cluster is not healthy after "
+ "multiple refresh-config operations")
# Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.io_validation_complete = True
+ g.log.info("Validating IO")
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
self.assertTrue(ret, "IO failed on some of the clients")
-
- # List all files and dirs created
- g.log.info("List all files and directories:")
- ret = list_all_files_and_dirs_mounts(self.mounts)
- self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
+ g.log.info("Successfully validated all IO")
def tearDown(self):
+ """
+ Disbale ACL if enabled
+ Unexport volume
+ Unmount and cleanup volume
+ """
if self.acl_check_flag:
- ret = disable_acl(self.mnode, self. volname)
+ ret = set_acl(self.mnode, self.volname, False)
if not ret:
raise ExecutionError("Failed to disable acl on %s"
% self.volname)
self.acl_check_flag = False
- NfsGaneshaIOBaseClass.tearDown.im_func(self)
+ # Unexport volume
+ unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ if not ret:
+ raise ExecutionError("Volume %s is not unexported." % self.volname)
+
+ # Unmount volume
+ ret = self.unmount_volume(self.mounts)
+ if ret:
+ g.log.info("Successfully unmounted the volume")
+ else:
+ g.log.error("Failed to unmount volume")
+
+ # Cleanup volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Cleanup volume %s completed successfully", self.volname)
@classmethod
def tearDownClass(cls):
-
- (NfsGaneshaIOBaseClass.
+ (NfsGaneshaClusterSetupClass.
tearDownClass.
- im_func(cls,
- teardown_nfsganesha_cluster=False))
+ im_func(cls, delete_nfs_ganesha_cluster=False))
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaIOBaseClass):
+class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
"""
- Tests to verfiy multiple volumes gets exported when IO is in progress.
+ Tests to verify multiple volumes gets exported when IO is in progress.
"""
+ @classmethod
+ def setUpClass(cls):
+ """
+ Setup nfs-ganesha if not exists.
+ Upload IO scripts to clients
+ """
+ NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
+
+ # Setup nfs-ganesha if not exists.
+ ret = cls.setup_nfs_ganesha()
+ if not ret:
+ raise ExecutionError("Failed to setup nfs-ganesha cluster")
+ g.log.info("nfs-ganesha cluster is healthy")
+
+ # Upload IO scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, script_local_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ """
+ Setup and mount volume
+ """
+ g.log.info("Starting to setup and mount volume %s", self.volname)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
def test_nfs_ganesha_export_with_multiple_volumes(self):
"""
- Testcase to verfiy multiple volumes gets exported when IO is in
+ Test case to verify multiple volumes gets exported when IO is in
progress.
"""
+ # Starting IO on the mounts
+ all_mounts_procs = []
+ count = 1
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path, count,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
+ # Create and export five new volumes
for i in range(5):
+ # Check availability of bricks to create new volume
+ num_of_unused_bricks = 0
+
+ servers_unused_bricks_dict = get_servers_unused_bricks_dict(
+ self.mnode, self.all_servers, self.all_servers_info)
+ for each_server_unused_bricks_list in list(
+ servers_unused_bricks_dict.values()):
+ num_of_unused_bricks = (num_of_unused_bricks +
+ len(each_server_unused_bricks_list))
+
+ if num_of_unused_bricks < 2:
+ self.assertNotEqual(i, 0, "New volume cannot be created due "
+ "to unavailability of bricks.")
+ g.log.warning("Tried to create five new volumes. But could "
+ "create only %s volume due to unavailability "
+ "of bricks.", str(i))
+ break
+
self.volume['name'] = "nfsvol" + str(i)
self.volume['voltype']['type'] = 'distributed'
self.volume['voltype']['replica_count'] = 1
self.volume['voltype']['dist_count'] = 2
+ new_vol = self.volume['name']
+
# Create volume
ret = setup_volume(mnode=self.mnode,
all_servers_info=self.all_servers_info,
volume_config=self.volume, force=True)
if not ret:
- self.assertTrue(ret, ("Setup volume %s failed" % self.volume))
- time.sleep(5)
+ self.assertTrue(ret, "Setup volume [%s] failed" % self.volume)
- # Export volume with nfs ganesha, if it is not exported already
- vol_option = get_volume_options(self.mnode, self.volume['name'],
- option='ganesha.enable')
- self.assertIsNotNone(vol_option, "Failed to get ganesha.enable "
- "volume option for %s" % self.volume['name'])
- if vol_option['ganesha.enable'] != 'on':
- ret, _, _ = export_nfs_ganesha_volume(
- mnode=self.mnode, volname=self.volume['name'])
- self.assertEqual(ret, 0, "Failed to export volume %s as NFS "
- "export" % self.volume['name'])
- time.sleep(5)
- else:
- g.log.info("Volume %s is exported already",
- self.volume['name'])
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, new_vol)
+ self.assertTrue(ret, "Volume %s process not online despite "
+ "waiting for 300 seconds" % new_vol)
- # Waiting for few seconds for volume export. Max wait time is
- # 120 seconds.
+ # Export volume with nfs ganesha
+ ret, _, _ = export_nfs_ganesha_volume(mnode=self.mnode,
+ volname=new_vol)
+ self.assertEqual(ret, 0, ("Failed to export volume %s "
+ "using nfs-ganesha" % new_vol))
+
+ # Wait for volume to get exported
ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode,
- (self.
- volume['name']))
- self.assertTrue(ret, ("Failed to export volume %s after "
- "starting volume when IO is running on "
- "another volume" % self.volume['name']))
+ new_vol)
+ self.assertTrue(ret, "Volume %s is not exported after setting "
+ "ganesha.enable 'on'" % new_vol)
+ g.log.info("Exported nfs-ganesha volume %s", new_vol)
# Log Volume Info and Status
- ret = log_volume_info_and_status(self.mnode, self.volume['name'])
+ ret = log_volume_info_and_status(self.mnode, new_vol)
self.assertTrue(ret, "Logging volume %s info and status failed"
- % self.volume['name'])
+ % new_vol)
# Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.io_validation_complete = True
+ g.log.info("Validating IO")
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
self.assertTrue(ret, "IO failed on some of the clients")
-
- # List all files and dirs created
- g.log.info("List all files and directories:")
- ret = list_all_files_and_dirs_mounts(self.mounts)
- self.assertTrue(ret, "Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
+ g.log.info("Successfully validated all IO")
def tearDown(self):
-
- # Clean up the volumes created specific for this tests.
+ """
+ Unexport volumes
+ Unmount and cleanup volumes
+ """
+ # Cleanup volumes created in test case
for i in range(5):
- volname = "nfsvol" + str(i)
- volinfo = get_volume_info(self.mnode, volname)
- if volinfo is None or volname not in volinfo:
- g.log.info("Volume %s does not exist in %s",
- volname, self.mnode)
- continue
-
- # Unexport volume, if it is not unexported already
- vol_option = get_volume_options(self.mnode, volname,
- option='ganesha.enable')
- if vol_option is None:
- raise ExecutionError("Failed to get ganesha.enable volume "
- " option for %s " % volname)
- if vol_option['ganesha.enable'] != 'off':
- if is_volume_exported(self.mnode, volname, "nfs"):
- ret, _, _ = unexport_nfs_ganesha_volume(
- mnode=self.mnode, volname=volname)
- if ret != 0:
- raise ExecutionError("Failed to unexport volume %s "
- % volname)
- time.sleep(5)
- else:
- g.log.info("Volume %s is unexported already", volname)
-
- _, _, _ = g.run(self.mnode, "showmount -e")
+ vol = "nfsvol" + str(i)
+ # Unexport and cleanup volume if exists
+ if volume_exists(self.mnode, vol):
+ unexport_nfs_ganesha_volume(self.mnode, vol)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(
+ self.mnode, vol)
+ if not ret:
+ raise ExecutionError("Volume %s is not unexported." % vol)
+
+ ret = cleanup_volume(mnode=self.mnode, volname=vol)
+ if not ret:
+ raise ExecutionError("Cleanup volume %s failed" % vol)
+
+ # Unexport pre existing volume
+ unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ if not ret:
+ raise ExecutionError("Volume %s is not unexported." % self.volname)
- ret = cleanup_volume(mnode=self.mnode, volname=volname)
- if not ret:
- raise ExecutionError("cleanup volume %s failed" % volname)
+ # Unmount pre existing volume
+ ret = self.unmount_volume(self.mounts)
+ if ret:
+ g.log.info("Successfully unmounted the volume")
+ else:
+ g.log.error("Failed to unmount volume")
- NfsGaneshaIOBaseClass.tearDown.im_func(self)
+ # Cleanup pre existing volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Cleanup volume %s completed successfully", self.volname)
@classmethod
def tearDownClass(cls):
-
- (NfsGaneshaIOBaseClass.
+ (NfsGaneshaClusterSetupClass.
tearDownClass.
- im_func(cls,
- teardown_nfsganesha_cluster=False))
+ im_func(cls, delete_nfs_ganesha_cluster=False))
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
+class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
"""
- Tests to verfiy nfs ganesha sub directory exports.
+ Tests to verify nfs-ganesha sub directory exports.
"""
-
- def start_and_wait_for_io_to_complete(self):
- """This module starts IO from clients and waits for io to complate.
- Returns True, if io gets completed successfully. Otherwise, False
+ @classmethod
+ def setUpClass(cls):
+ """
+ Setup nfs-ganesha if not exists.
+ Upload IO scripts to clients
"""
+ NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
+ # Setup nfs-ganesha if not exists.
+ ret = cls.setup_nfs_ganesha()
+ if not ret:
+ raise ExecutionError("Failed to setup nfs-ganesha cluster")
+ g.log.info("nfs-ganesha cluster is healthy")
+
+ # Upload IO scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, script_local_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def start_and_wait_for_io_to_complete(self, mount_objs):
+ """
+ This module start IO on clients and wait for io to complete.
+ Args:
+ mount_objs (lis): List of mounts
+ Returns:
+ True if IO is success, False otherwise.
+ """
# Start IO on mounts
- g.log.info("Starting IO on all mounts...")
+ g.log.info("Starting IO on all mounts.")
self.all_mounts_procs = []
- for mount_obj in self.mounts:
+ for mount_obj in mount_objs:
cmd = ("python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 15 "
"--max-num-of-dirs 5 "
"--num-of-files 10 %s" % (self.script_upload_path,
- self.counter,
+ self.dir_start,
mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
- self.counter = self.counter + 10
- self.io_validation_complete = False
+ self.dir_start += 10
- # Adding a delay of 15 seconds before test method starts. This
- # is to ensure IO's are in progress and giving some time to fill data
- time.sleep(15)
+ # Adding a delay of 15 seconds giving some time to fill data
+ sleep(15)
# Validate IO
ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.io_validation_complete = True
if not ret:
g.log.error("IO failed on some of the clients")
return False
-
- # List all files and dirs created
- g.log.info("List all files and directories:")
- ret = list_all_files_and_dirs_mounts(self.mounts)
- if not ret:
- g.log.error("Failed to list all files and dirs")
- return False
- g.log.info("Listing all files and directories is successful")
return True
def setUp(self):
- """setUp writes data from all mounts and selects subdirectory
- required for the test and unmount the existing mounts.
"""
+ Setup and mount volume
+ Write data on mounts and select sub-directory required for the test
+ and unmount the existing mounts.
+ """
+ # Counter for directory start number
+ self.dir_start = 1
- NfsGaneshaIOBaseClass.setUp.im_func(self)
-
- # Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
- self.io_validation_complete = True
+ g.log.info("Starting to setup and mount volume %s", self.volname)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
- raise ExecutionError("IO failed on some of the clients")
+ raise ExecutionError("Failed to setup and mount volume %s"
+ % self.volname)
+ g.log.info("Successful in setup and mount volume %s", self.volname)
- # List all files and dirs created
- g.log.info("List all files and directories:")
- ret = list_all_files_and_dirs_mounts(self.mounts)
- if not ret:
- raise ExecutionError("Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
+ ret = self.start_and_wait_for_io_to_complete(self.mounts)
+ self.assertTrue(ret, "IO failed on one or more clients")
mountpoint = self.mounts[0].mountpoint
client = self.mounts[0].client_system
@@ -419,16 +632,17 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
mount_obj.client_system, mount_obj.mountpoint)
_rc = False
if not _rc:
- raise ExecutionError("Unmount of all mounts are not "
- "successful")
+ raise ExecutionError("Unmount of all mounts are not successful")
+
+ # Mount objects for sub-directory mount
+ self.sub_dir_mounts = deepcopy(self.mounts)
def test_nfs_ganesha_subdirectory_mount_from_client_side(self):
"""
- Tests script to verify nfs ganesha subdirectory mount from client side
+ Tests script to verify nfs-ganesha subdirectory mount from client side
succeeds and able to write IOs.
"""
-
- for mount_obj in self.mounts:
+ for mount_obj in self.sub_dir_mounts:
subdir_to_mount = self.subdir_path.replace(mount_obj.mountpoint,
'')
if not subdir_to_mount.startswith(os.path.sep):
@@ -444,9 +658,10 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
mount_obj.client_system,
mount_obj.mountpoint)))
- ret = self.start_and_wait_for_io_to_complete()
+ ret = self.start_and_wait_for_io_to_complete(self.sub_dir_mounts)
self.assertTrue(ret, ("Failed to write IOs when sub directory is"
" mounted from client side"))
+ g.log.info("IO successful on all clients")
def test_nfs_ganesha_subdirectory_mount_from_server_side(self):
"""
@@ -458,31 +673,30 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
if not subdir_to_mount.startswith(os.path.sep):
subdir_to_mount = os.path.sep + subdir_to_mount
- for mount_obj in self.mounts:
- mount_obj.volname = mount_obj.volname + subdir_to_mount
+ subdir = self.volname + subdir_to_mount
+
+ for mount_obj in self.sub_dir_mounts:
+ mount_obj.volname = subdir
export_file = ("/var/run/gluster/shared_storage/nfs-ganesha/exports/"
"export.%s.conf" % self.volname)
cmd = (r"sed -i s/'Path = .*'/'Path = \"\/%s\";'/g %s"
- % (re.escape(self.mounts[0].volname), export_file))
+ % (re.escape(subdir), export_file))
ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, ("Unable to change Path info to %s in %s"
- % ("/" + self.mounts[0].volname,
- export_file)))
+ % ("/" + subdir, export_file)))
cmd = ("sed -i 's/volume=.*/& \\n volpath=\"%s\";/g' %s"
% (re.escape(subdir_to_mount), export_file))
ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, ("Unable to add volpath info to %s in %s"
- % ("/" + self.mounts[0].volname,
- export_file)))
+ % ("/" + subdir, export_file)))
cmd = (r"sed -i s/'Pseudo=.*'/'Pseudo=\"\/%s\";'/g %s"
- % (re.escape(self.mounts[0].volname), export_file))
+ % (re.escape(subdir), export_file))
ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, ("Unable to change pseudo Path info to "
- "%s in %s" % ("/" + self.mounts[0].volname,
- export_file)))
+ "%s in %s" % ("/" + subdir, export_file)))
# Stop and start volume to take the modified export file to effect.
# Stopping volume
@@ -502,13 +716,11 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
# Waiting for few seconds for volume export. Max wait time is
# 120 seconds.
- ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode,
- (self.mounts[0].
- volname))
+ ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode, subdir)
self.assertTrue(ret, ("Failed to export sub directory %s after "
- "starting volume" % self.mounts[0].volname))
+ "starting volume" % subdir))
- for mount_obj in self.mounts:
+ for mount_obj in self.sub_dir_mounts:
if not mount_obj.is_mounted():
ret = mount_obj.mount()
self.assertTrue(ret, ("Unable to mount volume '%s:%s' "
@@ -518,31 +730,36 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
mount_obj.client_system,
mount_obj.mountpoint)))
- ret = self.start_and_wait_for_io_to_complete()
+ ret = self.start_and_wait_for_io_to_complete(self.sub_dir_mounts)
self.assertTrue(ret, ("Failed to write IOs when sub directory is"
" mounted from server side"))
+ g.log.info("IO successful on clients")
def tearDown(self):
- """setUp starts the io from all the mounts.
- IO creates deep dirs and files.
"""
+ Unexport volume
+ Unmount and cleanup volume
+ """
+ # Unexport volume
+ ret, _, _ = unexport_nfs_ganesha_volume(self.mnode, self.volname)
+ if ret != 0:
+ raise ExecutionError("Failed to unexport volume %s" % self.volname)
- # Wait for IO to complete if io validation is not executed in the
- # test method
- if not self.io_validation_complete:
- g.log.info("Wait for IO to complete as IO validation did not "
- "succeed in test method")
- ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
- if not ret:
- raise ExecutionError("IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
+ # Unmount volume
+ ret = self.unmount_volume(self.sub_dir_mounts)
+ if ret:
+ g.log.info("Successfully unmounted the volume")
+ else:
+ g.log.error("Failed to unmount volume")
- NfsGaneshaIOBaseClass.tearDown.im_func(self)
+ # Cleanup volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to cleanup volume")
+ g.log.info("Cleanup volume %s completed successfully", self.volname)
@classmethod
def tearDownClass(cls):
-
- (NfsGaneshaIOBaseClass.
+ (NfsGaneshaClusterSetupClass.
tearDownClass.
- im_func(cls,
- teardown_nfsganesha_cluster=False))
+ im_func(cls, delete_nfs_ganesha_cluster=False))