summaryrefslogtreecommitdiffstats
path: root/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py')
-rwxr-xr-xtests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py162
1 files changed, 80 insertions, 82 deletions
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
index 63331ec73..bb1f2f71e 100755
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,14 +19,15 @@
refresh configs, cluster enable/disable functionality.
"""
-from time import sleep
+from copy import deepcopy
import os
import re
-from copy import deepcopy
+from time import sleep
+
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import runs_on
+
+from glustolibs.gluster.gluster_base_class import runs_on, GlusterBaseClass
from glustolibs.gluster.nfs_ganesha_libs import (
- NfsGaneshaClusterSetupClass,
wait_for_nfs_ganesha_volume_to_get_exported,
wait_for_nfs_ganesha_volume_to_get_unexported)
from glustolibs.gluster.nfs_ganesha_ops import (
@@ -47,23 +48,11 @@ from glustolibs.gluster.lib_utils import get_servers_unused_bricks_dict
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaVolumeExports(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaVolumeExports(GlusterBaseClass):
"""
Tests to verify Nfs Ganesha exports, cluster enable/disable
functionality.
"""
- @classmethod
- def setUpClass(cls):
- """
- Setup nfs-ganesha if not exists.
- """
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
def setUp(self):
"""
@@ -170,6 +159,59 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaClusterSetupClass):
"ganesha.enable 'on'" % self.volname)
g.log.info("Exported volume after enabling nfs-ganesha cluster")
+ def test_nfs_ganesha_exportID_after_vol_restart(self):
+ """
+ Tests script to check nfs-ganesha volume gets exported with same
+ Export ID after multiple volume restarts.
+ Steps:
+ 1. Create and Export the Volume
+ 2. Stop and Start the volume multiple times
+ 3. Check for export ID
+ Export ID should not change
+ """
+ for i in range(1, 4):
+ g.log.info("Testing nfs ganesha exportID after volume stop and "
+ "start.\n Count : %s", str(i))
+
+ # Stopping volume
+ ret = volume_stop(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
+ g.log.info("Volume is stopped")
+
+ # Waiting for few seconds for volume unexport. Max wait time is
+ # 120 seconds.
+ ret = wait_for_nfs_ganesha_volume_to_get_unexported(self.mnode,
+ self.volname)
+ self.assertTrue(ret, ("Failed to unexport volume %s after "
+ "stopping volume" % self.volname))
+ g.log.info("Volume is unexported via ganesha")
+
+ # Starting volume
+ ret = volume_start(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to start volume %s" % self.volname))
+ g.log.info("Volume is started")
+
+ # Waiting for few seconds for volume export. Max wait time is
+ # 120 seconds.
+ ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode,
+ self.volname)
+ self.assertTrue(ret, ("Failed to export volume %s after "
+ "starting volume" % self.volname))
+ g.log.info("Volume is exported via ganesha")
+
+ # Check for Export ID
+ cmd = ("cat /run/gluster/shared_storage/nfs-ganesha/exports/"
+ "export.*.conf | grep Export_Id | grep -Eo '[0-9]'")
+ ret, out, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "Unable to get export ID of the volume %s"
+ % self.volname)
+ g.log.info("Successful in getting volume export ID: %s " % out)
+ self.assertEqual(out.strip("\n"), "2",
+ "Export ID changed after export and unexport "
+ "of volume: %s" % out)
+ g.log.info("Export ID of volume is same after export "
+ "and export: %s" % out)
+
def tearDown(self):
"""
Unexport volume
@@ -195,17 +237,11 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaClusterSetupClass):
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
-
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaVolumeExportsWithIO(GlusterBaseClass):
"""
Tests to verify nfs ganesha features when IO is in progress.
"""
@@ -215,22 +251,14 @@ class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -261,13 +289,14 @@ class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -338,17 +367,11 @@ class TestNfsGaneshaVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
-
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaMultiVolumeExportsWithIO(GlusterBaseClass):
"""
Tests to verify multiple volumes gets exported when IO is in progress.
"""
@@ -358,22 +381,14 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
@@ -402,13 +417,14 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 10 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path, count,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path, count,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
all_mounts_procs.append(proc)
@@ -517,17 +533,11 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaClusterSetupClass):
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))
-
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['nfs']])
-class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
+class TestNfsGaneshaSubDirExportsWithIO(GlusterBaseClass):
"""
Tests to verify nfs-ganesha sub directory exports.
"""
@@ -537,13 +547,7 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
Setup nfs-ganesha if not exists.
Upload IO scripts to clients
"""
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Setup nfs-ganesha if not exists.
- ret = cls.setup_nfs_ganesha()
- if not ret:
- raise ExecutionError("Failed to setup nfs-ganesha cluster")
- g.log.info("nfs-ganesha cluster is healthy")
+ cls.get_super_method(cls, 'setUpClass')()
# Upload IO scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on "
@@ -571,14 +575,14 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
g.log.info("Starting IO on all mounts.")
self.all_mounts_procs = []
for mount_obj in mount_objs:
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 15 "
"--max-num-of-dirs 5 "
- "--num-of-files 10 %s" % (self.script_upload_path,
- self.dir_start,
- mount_obj.mountpoint))
+ "--num-of-files 10 %s" % (
+ self.script_upload_path,
+ self.dir_start, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
@@ -757,9 +761,3 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaClusterSetupClass):
if not ret:
raise ExecutionError("Failed to cleanup volume")
g.log.info("Cleanup volume %s completed successfully", self.volname)
-
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls, delete_nfs_ganesha_cluster=False))