summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArjun Sharma <arjsharm@redhat.com>2019-09-25 18:00:37 +0530
committerArjun Sharma <arjsharm@redhat.com>2019-09-25 18:11:50 +0530
commite95f40c0dff73d846adc6db72350218875a86dd7 (patch)
tree1c8fefa55e48e375c9930d0bd1e07da3669caea9
parent4ce909b825305b2bbc41c666e95e55c4ceceb002 (diff)
Removing redundant classes:
NfsGaneshaVolumeBaseClass and NfsGaneshaIOBaseClass are essentially not needed and act as redundant. Change-Id: I45f95e018daa4bf9575f4e831111f91615085bdc Signed-off-by: Arjun Sharma <arjsharm@redhat.com>
-rw-r--r--glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py248
1 files changed, 1 insertions, 247 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
index 20dbe43..2da786c 100644
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
@@ -30,21 +30,11 @@ from glustolibs.gluster.nfs_ganesha_ops import (
is_nfs_ganesha_cluster_in_healthy_state,
teardown_nfs_ganesha_cluster,
create_nfs_ganesha_cluster,
- export_nfs_ganesha_volume,
- unexport_nfs_ganesha_volume,
configure_ports_on_clients,
ganesha_client_firewall_settings)
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError, ConfigError
-from glustolibs.gluster.peer_ops import peer_probe_servers, peer_status
-from glustolibs.gluster.volume_ops import volume_info, get_volume_info
-from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume,
- log_volume_info_and_status,
- get_volume_options,
- is_volume_exported)
-from glustolibs.gluster.mount_ops import create_mount_objs
-from glustolibs.io.utils import log_mounts_info, wait_for_io_to_complete
-from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.gluster.volume_libs import is_volume_exported
class NfsGaneshaClusterSetupClass(GlusterBaseClass):
@@ -217,242 +207,6 @@ class NfsGaneshaClusterSetupClass(GlusterBaseClass):
g.log.info("Skipping teardown nfs-ganesha cluster...")
-class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
- """Sets up the nfs ganesha cluster, volume for testing purposes.
- """
- @classmethod
- def setUpClass(cls):
- """Setup volume exports volume with nfs-ganesha,
- mounts the volume.
- """
- # pylint: disable=too-many-branches
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Peer probe servers
- ret = peer_probe_servers(cls.mnode, cls.servers)
- if not ret:
- raise ExecutionError("Failed to peer probe servers")
-
- g.log.info("All peers are in connected state")
-
- # Peer Status from mnode
- peer_status(cls.mnode)
-
- for server in cls.servers:
- mount_info = [
- {'protocol': 'glusterfs',
- 'mountpoint': '/run/gluster/shared_storage',
- 'server': server,
- 'client': {'host': server},
- 'volname': 'gluster_shared_storage',
- 'options': ''}]
-
- mount_obj = create_mount_objs(mount_info)
- if not mount_obj[0].is_mounted():
- ret = mount_obj[0].mount()
- if not ret:
- raise ExecutionError("Unable to mount volume '%s:%s' "
- "on '%s:%s'"
- % (mount_obj.server_system,
- mount_obj.volname,
- mount_obj.client_system,
- mount_obj.mountpoint))
-
- # Setup Volume
- ret = setup_volume(mnode=cls.mnode,
- all_servers_info=cls.all_servers_info,
- volume_config=cls.volume)
- if not ret:
- raise ExecutionError("Setup volume %s failed", cls.volume)
- time.sleep(10)
-
- # Export volume with nfs ganesha, if it is not exported already
- vol_option = get_volume_options(cls.mnode, cls.volname,
- option='ganesha.enable')
- if vol_option is None:
- raise ExecutionError("Failed to get ganesha.enable volume option "
- "for %s " % cls.volume)
- if vol_option['ganesha.enable'] != 'on':
- ret, _, _ = export_nfs_ganesha_volume(
- mnode=cls.mnode, volname=cls.volname)
- if ret != 0:
- raise ExecutionError("Failed to export volume %s "
- "as NFS export", cls.volname)
- time.sleep(5)
-
- ret = wait_for_nfs_ganesha_volume_to_get_exported(cls.mnode,
- cls.volname)
- if not ret:
- raise ExecutionError("Failed to export volume %s. volume is "
- "not listed in showmount" % cls.volname)
- else:
- g.log.info("Volume %s is exported successfully"
- % cls.volname)
-
- # Log Volume Info and Status
- ret = log_volume_info_and_status(cls.mnode, cls.volname)
- if not ret:
- raise ExecutionError("Logging volume %s info and status failed",
- cls.volname)
-
- # Create Mounts
- _rc = True
- for mount_obj in cls.mounts:
- ret = mount_obj.mount()
- if not ret:
- g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
- mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint)
- _rc = False
- if not _rc:
- raise ExecutionError("Mounting volume %s on few clients failed",
- cls.volname)
-
- # Get info of mount before the IO
- log_mounts_info(cls.mounts)
-
- @classmethod
- def tearDownClass(cls, umount_vol=True, cleanup_vol=True,
- teardown_nfs_ganesha_cluster=True):
- """Teardown the export, mounts and volume.
- """
- # pylint: disable=too-many-branches
- # Unmount volume
- if umount_vol:
- _rc = True
- for mount_obj in cls.mounts:
- ret = mount_obj.unmount()
- if not ret:
- g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
- mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint)
- _rc = False
- if not _rc:
- raise ExecutionError("Unmount of all mounts are not "
- "successful")
-
- # Cleanup volume
- if cleanup_vol:
-
- volinfo = get_volume_info(cls.mnode, cls.volname)
- if volinfo is None or cls.volname not in volinfo:
- g.log.info("Volume %s does not exist in %s"
- % (cls.volname, cls.mnode))
- else:
- # Unexport volume, if it is not unexported already
- vol_option = get_volume_options(cls.mnode, cls.volname,
- option='ganesha.enable')
- if vol_option is None:
- raise ExecutionError("Failed to get ganesha.enable volume "
- " option for %s " % cls.volume)
- if vol_option['ganesha.enable'] != 'off':
- if is_volume_exported(cls.mnode, cls.volname, "nfs"):
- ret, _, _ = unexport_nfs_ganesha_volume(
- mnode=cls.mnode, volname=cls.volname)
- if ret != 0:
- raise ExecutionError("Failed to unexport volume %s"
- % cls.volname)
- time.sleep(5)
- else:
- g.log.info("Volume %s is unexported already"
- % cls.volname)
-
- _, _, _ = g.run(cls.mnode, "showmount -e")
-
- ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
- if not ret:
- raise ExecutionError("cleanup volume %s failed", cls.volname)
-
- # All Volume Info
- volume_info(cls.mnode)
-
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls,
- delete_nfs_ganesha_cluster=teardown_nfs_ganesha_cluster))
-
-
-class NfsGaneshaIOBaseClass(NfsGaneshaVolumeBaseClass):
- """ Nfs Ganesha IO base class to run the tests when IO is in progress """
-
- @classmethod
- def setUpClass(cls):
-
- NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls)
-
- # Upload io scripts for running IO on mounts
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
- cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
- if not ret:
- raise ExecutionError("Failed to upload IO scripts")
-
- cls.counter = 1
-
- def setUp(self):
- """setUp starts the io from all the mounts.
- IO creates deep dirs and files.
- """
-
- NfsGaneshaVolumeBaseClass.setUp.im_func(self)
-
- # Start IO on mounts
- g.log.info("Starting IO on all mounts...")
- self.all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_deep_dirs_with_files "
- "--dirname-start-num %d "
- "--dir-depth 2 "
- "--dir-length 15 "
- "--max-num-of-dirs 5 "
- "--num-of-files 10 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- self.all_mounts_procs.append(proc)
- self.counter = self.counter + 10
- self.io_validation_complete = False
-
- # Adding a delay of 15 seconds before test method starts. This
- # is to ensure IO's are in progress and giving some time to fill data
- time.sleep(15)
-
- def tearDown(self):
- """If test method failed before validating IO, tearDown waits for the
- IO's to complete and checks for the IO exit status
- """
-
- # Wait for IO to complete if io validation is not executed in the
- # test method
- if not self.io_validation_complete:
- g.log.info("Wait for IO to complete as IO validation did not "
- "succeed in test method")
- ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
- if not ret:
- raise ExecutionError("IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
- NfsGaneshaVolumeBaseClass.tearDown.im_func(self)
-
- @classmethod
- def tearDownClass(cls, umount_volume=True, cleanup_volume=True,
- teardown_nfsganesha_cluster=True):
- """Cleanup data from mount, cleanup volume and delete nfs ganesha
- cluster.
- """
- # Log Mounts info
- g.log.info("Log mounts info")
- log_mounts_info(cls.mounts)
-
- (NfsGaneshaVolumeBaseClass.
- tearDownClass.
- im_func(cls,
- umount_vol=umount_volume, cleanup_vol=cleanup_volume,
- teardown_nfs_ganesha_cluster=teardown_nfsganesha_cluster))
-
-
def wait_for_nfs_ganesha_volume_to_get_exported(mnode, volname, timeout=120):
"""Waits for the nfs ganesha volume to get exported